# encoding = "utf-8"
from datetime import datetime
from datetime import timedelta
import pymysql
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.metrics import auc
from multiprocessing import Pool


def get_date():
    now = datetime.now()
    year = now.year
    month = now.month
    day = now.day
    date = datetime(year,month,day)
    data_start_date = (date - timedelta(days=31)).strftime("%Y-%m-%d")
    data_end_date = (date - timedelta(days=1)).strftime("%Y-%m-%d")
    validation_date = (date - timedelta(days=2)).strftime("%Y-%m-%d")
    # 验证集和测试集的日期必须相差一天,否则切割数据集时会报错
    test_date = data_end_date
    print("data_start_date,data_end_date,validation_date,test_date:")
    print(data_start_date,data_end_date,validation_date,test_date)
    return data_start_date,data_end_date,validation_date,test_date


def get_roc_curve(y, pred, pos_label):
    """
    计算二分类问题的roc和auc
    """
    fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label)
    AUC = metrics.auc(fpr, tpr)
    print(AUC)


# 从Tidb数据库的表里获取数据,并转化成df格式
def con_sql(sql):
    db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
    cursor = db.cursor()
    cursor.execute(sql)
    result = cursor.fetchall()
    df = pd.DataFrame(list(result)).dropna()
    db.close()
    return df


# 多线程ffm转化类:
class multiFFMFormatPandas:
    def __init__(self):
        self.field_index_ = None
        self.feature_index_ = None
        self.y = None

    def fit(self, df, y=None):
        self.y = y
        df_ffm = df[df.columns.difference([self.y])]
        if self.field_index_ is None:
            self.field_index_ = {col: i for i, col in enumerate(df_ffm)}

        if self.feature_index_ is not None:
            last_idx = max(list(self.feature_index_.values()))

        if self.feature_index_ is None:
            self.feature_index_ = dict()
            last_idx = 0

        for col in df.columns:
            vals = df[col].unique()
            for val in vals:
                if pd.isnull(val):
                    continue
                name = '{}_{}'.format(col, val)
                if name not in self.feature_index_:
                    self.feature_index_[name] = last_idx
                    last_idx += 1
            self.feature_index_[col] = last_idx
            last_idx += 1

        return self

    def fit_transform(self, df, y=None,n=50000,processes=4):
        # n是每个线程运行最大的数据条数,processes是线程数
        self.fit(df, y)
        n = n
        processes = processes
        return self.transform(df,n,processes)

    def transform_row_(self, row, t):
        ffm = []
        if self.y is not None:
            ffm.append(str(row.loc[row.index == self.y][0]))
        if self.y is None:
            ffm.append(str(0))

        for col, val in row.loc[row.index != self.y].to_dict().items():
            col_type = t[col]
            name = '{}_{}'.format(col, val)
            if col_type.kind == 'O':
                ffm.append('{}:{}:1'.format(self.field_index_[col], self.feature_index_[name]))
            elif col_type.kind == 'i':
                ffm.append('{}:{}:{}'.format(self.field_index_[col], self.feature_index_[col], val))
        return ' '.join(ffm)

    def transform(self, df,n=1500,processes=2):
        # n是每个线程运行最大的数据条数,processes是线程数
        t = df.dtypes.to_dict()
        data_list = self.data_split_line(df,n)

        # 设置进程的数量
        pool = Pool(processes)
        print("总进度: " + str(len(data_list)))
        for i in range(len(data_list)):
            data_list[i] = pool.apply_async(self.pool_function, (data_list[i], t,))

        result_map = {}
        for i in data_list:
            result_map.update(i.get())
        pool.close()
        pool.join()

        return pd.Series(result_map)

    # 原生转化方法,不需要多进程
    def native_transform(self,df):
        t = df.dtypes.to_dict()
        return pd.Series({idx: self.transform_row_(row, t) for idx, row in df.iterrows()})

    # 多进程计算方法
    def pool_function(self, df, t):
        return {idx: self.transform_row_(row, t) for idx, row in df.iterrows()}

    # 切分数据方法,传人dataframe和切分条数的步长,返回dataframe的集合,每个dataframe中含有若干条数据
    def data_split_line(self, data, step):
        data_list = []
        x = 0
        while True:
            if x + step < data.__len__():
                data_list.append(data.iloc[x:x + step])
                x = x + step + 1
            else:
                data_list.append(data.iloc[x:data.__len__()])
                break

        '''
        # 返回生成器方法,但是本地测试效率不高
        x = 0
        while True:
            if x + step < data.__len__():
                yield data.iloc[x:x + step]
                x = x + step + 1
            else:
                yield data.iloc[x:data.__len__()]
                break
        '''

        return data_list


    # 下面这个方法不是这个类原有的方法,是新增的。目的是用来判断这个用户是不是在训练数据集中存在
    def is_feature_index_exist(self, name):
        if name in self.feature_index_:
            return True
        else:
            return False

# ffm 格式转换函数、类
class FFMFormatPandas:
    def __init__(self):
        self.field_index_ = None
        self.feature_index_ = None
        self.y = None

    def fit(self, df, y=None):
        self.y = y
        df_ffm = df[df.columns.difference([self.y])]
        if self.field_index_ is None:
            # 除了y,每列列名加索引对应的字典,例如field_index = {name:0,age:1}
            self.field_index_ = {col: i for i, col in enumerate(df_ffm)}

        if self.feature_index_ is not None:
            last_idx = max(list(self.feature_index_.values()))

        if self.feature_index_ is None:
            self.feature_index_ = dict()
            last_idx = 0
# 下面这个feature包括y,应该不包括。这是个bug
        for col in df.columns:
            vals = df[col].unique()
            for val in vals:
                if pd.isnull(val):
                    continue
                name = '{}_{}'.format(col, val)
                if name not in self.feature_index_:
                    # feature_index = {name_tom :0,name_lily :1,name:2,age_18:3,age_19:4:age:5}
                    self.feature_index_[name] = last_idx
                    last_idx += 1
            self.feature_index_[col] = last_idx
            last_idx += 1
        return self

    def fit_transform(self, df, y=None):
        self.fit(df, y)
        return self.transform(df)

    def transform_row_(self, row, t):
        ffm = []
        if self.y is not None:
            ffm.append(str(row.loc[row.index == self.y][0]))
        if self.y is None:
            ffm.append(str(0))

        for col, val in row.loc[row.index != self.y].to_dict().items():
            col_type = t[col]
            name = '{}_{}'.format(col, val)
            if col_type.kind == 'O':
                ffm.append('{}:{}:1'.format(self.field_index_[col], self.feature_index_[name]))
            elif col_type.kind == 'i':
                ffm.append('{}:{}:{}'.format(self.field_index_[col], self.feature_index_[col], val))
        return ' '.join(ffm)

    def transform(self, df):
        t = df.dtypes.to_dict()
        return pd.Series({idx: self.transform_row_(row, t) for idx, row in df.iterrows()})

    # 下面这个方法不是这个类原有的方法,是新增的。目的是用来判断这个用户是不是在训练数据集中存在
    def is_feature_index_exist(self, name):
        if name in self.feature_index_:
            return True
        else:
            return False