utils.py 9.51 KB
# encoding = "utf-8"
from datetime import datetime
from datetime import timedelta
import pymysql
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.metrics import auc
from multiprocessing import Pool
import os
import signal
from config import *
import socket


def judge_online():
    # 下面这个ip是本地电脑ip
    if socket.gethostbyname(socket.gethostname()) == '172.30.8.160':
        flag = False
        path = LOCAL_DIRCTORY
    else:
        flag = True
        path = DIRECTORY_PATH
    return flag,path


def get_date():
    now = datetime.now()
    year = now.year
    month = now.month
    day = now.day
    date = datetime(year,month,day)
    data_start_date = "2018-07-15"
    # data_end_date = "2018-09-02"
    # validation_date = "2018-09-01"
    # data_start_date = (date - timedelta(days=3)).strftime("%Y-%m-%d")
    data_end_date = (date - timedelta(days=1)).strftime("%Y-%m-%d")
    validation_date = (date - timedelta(days=2)).strftime("%Y-%m-%d")
    # 验证集和测试集的日期必须相差一天,否则切割数据集时会报错
    test_date = data_end_date
    print("data_start_date,data_end_date,validation_date,test_date:")
    print(data_start_date,data_end_date,validation_date,test_date)
    return data_start_date,data_end_date,validation_date,test_date


def get_roc_curve(y, pred, pos_label):
    """
    计算二分类问题的roc和auc
    """
    fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label)
    AUC = metrics.auc(fpr, tpr)
    print(AUC)


# 从Tidb数据库的表里获取数据,并转化成df格式,去掉空值
def con_sql(db,sql):
    cursor = db.cursor()
    try:
        cursor.execute(sql)
        result = cursor.fetchall()
        df = pd.DataFrame(list(result)).dropna()
    except Exception:
        print("发生异常", Exception)
        df = pd.DataFrame()
    finally:
        db.close()
    return df


# 下面这个函数与上面那个函数区别是上面那个函数去掉了空值
def sql_df(db,sql):
    cursor = db.cursor()
    try:
        cursor.execute(sql)
        result = cursor.fetchall()
        df = pd.DataFrame(list(result))
    except Exception:
        print("发生异常", Exception)
        df = pd.DataFrame()
    finally:
        db.close()
    return df


def move_file():
    import os
    for eachFile in os.listdir(DIRECTORY_PATH+"train"):
        os.rename(DIRECTORY_PATH+"train" + "/" + eachFile,DIRECTORY_PATH + eachFile)
    print("成功将文件剪切到对应路径")


def restart_process():
    out = os.popen("ps aux | grep diaryUpdateOnlineOffline.py").read()
    for line in out.splitlines():
        if 'python diaryUpdateOnlineOffline.py' in line:
            pid = int(line.split()[1])
            # 有些进程的生命周期非常短或者随时可能结束,一定要捕捉这个异常
            try:
                os.kill(pid, signal.SIGKILL)
                print("已杀死python diaryUpdateOnlineOffline.py 进程")
            except OSError:
                print('没有如此进程!!!')
            os.popen('python diaryUpdateOnlineOffline.py')
            print("已经成功重启diaryUpdateOnlineOffline.py")
        else:
            os.popen('python diaryUpdateOnlineOffline.py')
            print("成功重启diaryUpdateOnlineOffline.py")


# 多线程ffm转化类:
class multiFFMFormatPandas:
    def __init__(self):
        self.field_index_ = None
        self.feature_index_ = None
        self.y = None

    def fit(self, df, y=None):
        self.y = y
        df_ffm = df[df.columns.difference([self.y])]
        if self.field_index_ is None:
            self.field_index_ = {col: i for i, col in enumerate(df_ffm)}

        if self.feature_index_ is not None:
            last_idx = max(list(self.feature_index_.values()))

        if self.feature_index_ is None:
            self.feature_index_ = dict()
            last_idx = 0

        for col in df.columns:
            vals = df[col].unique()
            for val in vals:
                if pd.isnull(val):
                    continue
                name = '{}_{}'.format(col, val)
                if name not in self.feature_index_:
                    self.feature_index_[name] = last_idx
                    last_idx += 1
            self.feature_index_[col] = last_idx
            last_idx += 1

        return self

    def fit_transform(self, df, y=None,n=50000,processes=4):
        # n是每个线程运行最大的数据条数,processes是线程数
        self.fit(df, y)
        n = n
        processes = processes
        return self.transform(df,n,processes)

    def transform_row_(self, row, t):
        ffm = []
        if self.y is not None:
            ffm.append(str(row.loc[row.index == self.y][0]))
        if self.y is None:
            ffm.append(str(0))

        for col, val in row.loc[row.index != self.y].to_dict().items():
            col_type = t[col]
            name = '{}_{}'.format(col, val)
            if col_type.kind == 'O':
                ffm.append('{}:{}:1'.format(self.field_index_[col]+1, self.feature_index_[name]+1))
            elif col_type.kind == 'i':
                ffm.append('{}:{}:{}'.format(self.field_index_[col]+1, self.feature_index_[col]+1, val))
        return ' '.join(ffm)

    def transform(self, df,n=1500,processes=2):
        # n是每个线程运行最大的数据条数,processes是线程数
        t = df.dtypes.to_dict()
        data_list = self.data_split_line(df,n)

        # 设置进程的数量
        pool = Pool(processes)
        print("总进度: " + str(len(data_list)))
        for i in range(len(data_list)):
            data_list[i] = pool.apply_async(self.pool_function, (data_list[i], t,))

        result_map = {}
        for i in data_list:
            result_map.update(i.get())
        pool.close()
        pool.join()

        return pd.Series(result_map)

    # 多进程计算方法
    def pool_function(self, df, t):
        return {idx: self.transform_row_(row, t) for idx, row in df.iterrows()}

    # 切分数据方法,传人dataframe和切分条数的步长,返回dataframe的集合,每个dataframe中含有若干条数据
    def data_split_line(self, data, step):
        data_list = []
        x = 0
        while True:
            if x + step < data.__len__():
                data_list.append(data.iloc[x:x + step])
                x = x + step + 1
            else:
                data_list.append(data.iloc[x:data.__len__()])
                break

        '''
        # 返回生成器方法,但是本地测试效率不高
        x = 0
        while True:
            if x + step < data.__len__():
                yield data.iloc[x:x + step]
                x = x + step + 1
            else:
                yield data.iloc[x:data.__len__()]
                break
        '''

        return data_list

    # 原生转化方法,不需要多进程
    def native_transform(self, df):
            t = df.dtypes.to_dict()
            return pd.Series({idx: self.transform_row_(row, t) for idx, row in df.iterrows()})


    # 下面这个方法不是这个类原有的方法,是新增的。目的是用来判断这个用户是不是在训练数据集中存在
    def is_feature_index_exist(self, name):
        if name in self.feature_index_:
            return True
        else:
            return False

# ffm 格式转换函数、类
# class FFMFormatPandas:
#     def __init__(self):
#         self.field_index_ = None
#         self.feature_index_ = None
#         self.y = None
#
#     def fit(self, df, y=None):
#         self.y = y
#         df_ffm = df[df.columns.difference([self.y])]
#         if self.field_index_ is None:
#             self.field_index_ = {col: i for i, col in enumerate(df_ffm)}
#
#         if self.feature_index_ is not None:
#             last_idx = max(list(self.feature_index_.values()))
#
#         if self.feature_index_ is None:
#             self.feature_index_ = dict()
#             last_idx = 0
#
#         for col in df.columns:
#             vals = df[col].unique()
#             for val in vals:
#                 if pd.isnull(val):
#                     continue
#                 name = '{}_{}'.format(col, val)
#                 if name not in self.feature_index_:
#                     self.feature_index_[name] = last_idx
#                     last_idx += 1
#             self.feature_index_[col] = last_idx
#             last_idx += 1
#         return self
#
#     def fit_transform(self, df, y=None):
#         self.fit(df, y)
#         return self.transform(df)
#
#     def transform_row_(self, row, t):
#         ffm = []
#         if self.y is not None:
#             ffm.append(str(row.loc[row.index == self.y][0]))
#         if self.y is None:
#             ffm.append(str(0))
#
#         for col, val in row.loc[row.index != self.y].to_dict().items():
#             col_type = t[col]
#             name = '{}_{}'.format(col, val)
#             if col_type.kind == 'O':
#                 ffm.append('{}:{}:1'.format(self.field_index_[col], self.feature_index_[name]))
#             elif col_type.kind == 'i':
#                 ffm.append('{}:{}:{}'.format(self.field_index_[col], self.feature_index_[col], val))
#         return ' '.join(ffm)
#
#     def transform(self, df):
#         t = df.dtypes.to_dict()
#         return pd.Series({idx: self.transform_row_(row, t) for idx, row in df.iterrows()})
#
#     下面这个方法不是这个类原有的方法,是新增的。目的是用来判断这个用户是不是在训练数据集中存在
#     def is_feature_index_exist(self, name):
#         if name in self.feature_index_:
#             return True
#         else:
#             return False