Commit 94c4bc5c authored by 高雅喆's avatar 高雅喆

Merge branch 'master' of git.wanmeizhensuo.com:ML/ffm-baseline

use python ffm_encoder
parents 4ad04627 9e1531b4
......@@ -3,11 +3,11 @@ PYTHON_PATH=/home/gaoyazhe/miniconda3/bin/python
MODEL_PATH=/srv/apps/ffm-baseline/eda/esmm
DATA_PATH=/home/gaoyazhe/data
echo "start timestamp"
echo "start time"
current=$(date "+%Y-%m-%d %H:%M:%S")
timeStamp=$(date -d "$current" +%s)
currentTimeStamp=$((timeStamp*1000+`date "+%N"`/1000000))
echo $currentTimeStamp
echo $current
echo "rm leave tfrecord"
rm ${DATA_PATH}/tr/*
......@@ -40,22 +40,20 @@ rm ${DATA_PATH}/va/va_*
rm ${DATA_PATH}/native/native_*
rm ${DATA_PATH}/nearby/nearby_*
echo "data transform timestamp"
echo "data transform time"
current=$(date "+%Y-%m-%d %H:%M:%S")
timeStamp=$(date -d "$current" +%s)
currentTimeStamp=$((timeStamp*1000+`date "+%N"`/1000000))
echo $currentTimeStamp
echo $current
echo "train..."
${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/DeepCvrMTL.py --ctr_task_wgt=0.3 --learning_rate=0.0001 --deep_layers=256,128 --dropout=0.8,0.5 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=11 --feature_size=354332 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir="${DATA_PATH}" --task_type="train"
echo "train timestamp"
echo "train time"
current=$(date "+%Y-%m-%d %H:%M:%S")
timeStamp=$(date -d "$current" +%s)
currentTimeStamp=$((timeStamp*1000+`date "+%N"`/1000000))
echo $currentTimeStamp
${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/send_mail.py
echo $current
echo "infer native..."
${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/DeepCvrMTL.py --ctr_task_wgt=0.3 --learning_rate=0.0001 --deep_layers=256,128 --dropout=0.8,0.5 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=11 --feature_size=354332 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir="${DATA_PATH}/native" --task_type="infer" > ${DATA_PATH}/infer.log
......@@ -67,8 +65,10 @@ ${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/DeepCvrMTL.py --ctr_task_wgt=0.3 --le
echo "sort and 2sql"
${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/sort_and_2sql.py
echo "infer and sort and 2sql timestamp"
echo "infer and sort and 2sql time"
current=$(date "+%Y-%m-%d %H:%M:%S")
timeStamp=$(date -d "$current" +%s)
currentTimeStamp=$((timeStamp*1000+`date "+%N"`/1000000))
echo $currentTimeStamp
\ No newline at end of file
echo $current
${PYTHON_PATH} ${MODEL_PATH}/Model_pipline/send_mail.py
\ No newline at end of file
def merge_sort(lst):
if len(lst) <= 1:
return lst
middle = int(len(lst) / 2)
left = merge_sort(lst[:middle])
right = merge_sort(lst[middle:])
merged = []
while left and right:
merged.append(left.pop(0) if left[0] <= right[0] else right.pop(0))
merged.extend(right if right else left)
return merged
data_lst = [6,202,100,301,38,8,1]
print(merge_sort(data_lst))
\ No newline at end of file
......@@ -140,16 +140,23 @@ def get_data():
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select max(stat_date) from esmm_train_data"
validate_date = con_sql(db, sql)[0].values.tolist()[0]
print("validate_date:"+validate_date)
print("validate_date:" + validate_date)
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=14)).strftime("%Y-%m-%d")
start = (temp - datetime.timedelta(days=15)).strftime("%Y-%m-%d")
print(start)
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select device_id,y,z,stat_date,ucity_id,cid_id,clevel1_id,ccity_name from esmm_train_data " \
"where stat_date >= '{}'".format(start)
df = con_sql(db,sql)
df = df.rename(columns={0:"device_id",1: "y",2:"z",3:"stat_date",4:"ucity_id",5:"cid_id",
6:"clevel1_id",7:"ccity_name"})
sql = "select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel," \
"home.jingxuan,home.zhibo,home.nose,home.eyes,home.weizheng,home.teeth,home.lunkuo," \
"home.meifu,home.xizhi,home.zhifang,home.longxiong,home.simi,home.maofa,home.gongli,home.korea " \
"from esmm_train_data e left join user_feature u on e.device_id = u.device_id " \
"left join home_tab_click home on e.device_id = home.device_id " \
"where e.stat_date >= '{}'".format(start)
df = con_sql(db, sql)
df = df.rename(columns={0: "device_id", 1: "y", 2: "z", 3: "stat_date", 4: "ucity_id", 5: "cid_id",
6: "clevel1_id", 7: "ccity_name"})
print("esmm data ok")
print(df.head(2))
ucity_id = list(set(df["ucity_id"].values.tolist()))
cid = list(set(df["cid_id"].values.tolist()))
df["clevel1_id"] = df["clevel1_id"].astype("str")
......@@ -158,16 +165,16 @@ def get_data():
df["z"] = df["z"].astype("str")
df["y"] = df["stat_date"].str.cat([df["device_id"].values.tolist(),df["ucity_id"].values.tolist(), df["cid_id"].values.tolist(),
df["y"].values.tolist(),df["z"].values.tolist()], sep=",")
df = df.drop("z", axis=1)
df = pd.merge(df,get_statistics(),how='left',on = "device_id").fillna(0)
df = df.drop("device_id", axis=1)
df = df.drop(["z","device_id"], axis=1).fillna(0.0)
print(df.head(2))
print("fields:{}".format(df.shape[1]-1))
print("features:{}".format(len(cid)))
return df,validate_date,ucity_id,cid
def transform(a,validate_date):
model = multiFFMFormatPandas()
df = model.fit_transform(a, y="y", n=160000, processes=22)
df = model.fit_transform(a, y="y", n=160000, processes=26)
df = pd.DataFrame(df)
df["stat_date"] = df[0].apply(lambda x: x.split(",")[0])
df["device_id"] = df[0].apply(lambda x: x.split(",")[1])
......@@ -187,51 +194,30 @@ def transform(a,validate_date):
test = test.drop("stat_date",axis=1)
# print("train shape")
# print(train.shape)
train.to_csv(path + "train.csv", sep="\t", index=False)
test.to_csv(path + "test.csv", sep="\t", index=False)
# train.to_csv(path + "train.csv", sep="\t", index=False)
# test.to_csv(path + "test.csv", sep="\t", index=False)
return model
def get_user_feature():
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select max(stat_date) from esmm_train_data"
validate_date = con_sql(db, sql)[0].values.tolist()[0]
print("validate_date:" + validate_date)
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=2)).strftime("%Y-%m-%d")
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel,home.total" \
"from (esmm_train_data e left join user_feature u on e.device_id = u.device_id) " \
"left join home_tab_click home on e.device_id = home.device_id" \
"where e.stat_date >= '{}'".format(start)
df = con_sql(db, sql)
df = df.rename(columns={0: "device_id", 1: "y", 2: "z", 3: "stat_date", 4: "ucity_id", 5: "cid_id",
6: "clevel1_id", 7: "ccity_name"})
print(df.head(2))
def get_statistics():
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select device_id,total,精选,直播,鼻部,眼部,微整,牙齿,轮廓,美肤抗衰," \
"吸脂,脂肪填充,隆胸,私密,毛发管理,公立,韩国 from home_tab_click"
df = con_sql(db, sql)
df = df.rename(columns={0:"device_id",1:"total"})
for i in df.columns.difference(["device_id","total"]):
df[i] = df[i]/df["total"]
df[i] = df[i].apply(lambda x: format(x,".4f"))
df[i] = df[i].astype("float")
df = df.drop("total", axis=1)
return df
def get_predict_set(ucity_id, cid,model):
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select device_id,y,z,stat_date,ucity_id,cid_id,clevel1_id,ccity_name,label from esmm_pre_data"
sql = "select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel," \
"home.jingxuan,home.zhibo,home.nose,home.eyes,home.weizheng,home.teeth,home.lunkuo," \
"home.meifu,home.xizhi,home.zhifang,home.longxiong,home.simi,home.maofa,home.gongli,home.korea,e.label " \
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id " \
"left join home_tab_click home on e.device_id = home.device_id"
df = con_sql(db, sql)
df = df.rename(columns={0: "device_id", 1: "y", 2: "z", 3: "stat_date", 4: "ucity_id", 5: "cid_id",
6: "clevel1_id", 7: "ccity_name",8:"label"})
6: "clevel1_id", 7: "ccity_name",26:"label"})
print("before filter:")
print(df.shape)
df = df[df["cid_id"].isin(cid)]
print("after cid filter:")
print(df.shape)
df = df[df["ucity_id"].isin(ucity_id)]
print("after ucity filter:")
print(df.shape)
df["clevel1_id"] = df["clevel1_id"].astype("str")
df["cid_id"] = df["cid_id"].astype("str")
......@@ -241,11 +227,7 @@ def get_predict_set(ucity_id, cid,model):
df["y"] = df["label"].str.cat(
[df["device_id"].values.tolist(), df["ucity_id"].values.tolist(), df["cid_id"].values.tolist(),
df["y"].values.tolist(), df["z"].values.tolist()], sep=",")
df = df.drop(["z","label"], axis=1)
df = pd.merge(df, get_statistics(), how='left',on = "device_id").fillna(0)
df = df.drop("device_id", axis=1)
print("df ok")
print(df.shape)
df = df.drop(["z","label","device_id"], axis=1).fillna(0.0)
print(df.head(2))
df = model.transform(df,n=160000, processes=22)
df = pd.DataFrame(df)
......@@ -276,15 +258,13 @@ def get_predict_set(ucity_id, cid,model):
if __name__ == "__main__":
get_user_feature()
path = "/home/gmuser/ffm/"
a = time.time()
# df, validate_date, ucity_id, cid = get_data()
# model = transform(df, validate_date)
# get_predict_set(ucity_id, cid,model)
# b = time.time()
# print("cost(分钟)")
# print((b-a)/60)
df, validate_date, ucity_id, cid = get_data()
model = transform(df, validate_date)
get_predict_set(ucity_id, cid,model)
b = time.time()
print("cost(分钟)")
print((b-a)/60)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment