Commit 5f70f44f authored by 张彦钊's avatar 张彦钊

修改zhao分支上的esmm生产项目

parent f9c7c687
...@@ -40,44 +40,64 @@ def get_data(): ...@@ -40,44 +40,64 @@ def get_data():
start = (temp - datetime.timedelta(days=300)).strftime("%Y-%m-%d") start = (temp - datetime.timedelta(days=300)).strftime("%Y-%m-%d")
print(start) print(start)
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test') db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name," \ sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,cut.time,dl.app_list " \ "u.channel,c.top,e.device_id,cut.time,dl.app_list,e.diary_service_id,feat.level3_ids,feat.level2 " \
"from {} e left join user_feature u on e.device_id = u.device_id " \ "from {} e left join user_feature u on e.device_id = u.device_id " \
"left join cid_type_top c on e.device_id = c.device_id " \ "left join cid_type_top c on e.device_id = c.device_id " \
"left join cid_time_cut cut on e.cid_id = cut.cid " \ "left join cid_time_cut cut on e.cid_id = cut.cid " \
"left join device_app_list dl on e.device_id = dl.device_id " \ "left join device_app_list dl on e.device_id = dl.device_id " \
"left join diary_feat feat on e.cid_id = feat.diary_id " \ "left join diary_feat feat on e.cid_id = feat.diary_id " \
"where e.stat_date >= '{}'".format(train_data_set,start) "where e.stat_date >= '{}'".format(train_data_set, start)
df = con_sql(db, sql) df = con_sql(db, sql)
# print(df.shape) # print(df.shape)
df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name", df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name",
6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id", 6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
11: "time",12:"app_list"}) 11: "time", 12: "app_list", 13: "service_id", 14: "level3_ids", 15: "level2"})
print("esmm data ok")
# print(df.head(2) db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select level2_id,treatment_method,price_min,price_max,treatment_time,maintain_time,recover_time " \
"from train_Knowledge_network_data"
knowledge = con_sql(db, sql)
knowledge = knowledge.rename(columns={0: "level2", 1: "method", 2: "min", 3: "max",
4: "treatment_time", 5: "maintain_time", 6: "recover_time"})
knowledge["level2"] = knowledge["level2"].astype("str")
df = pd.merge(df, knowledge, on='level2', how='left')
df = df.drop("level2", axis=1)
service_id = tuple(df["service_id"].unique())
db = pymysql.connect(host='rdsfewzdmf0jfjp9un8xj.mysql.rds.aliyuncs.com', port=3306, user='work',
passwd='BJQaT9VzDcuPBqkd', db='zhengxing')
sql = "select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id " \
"where s.id in {}".format(service_id)
hospital = con_sql(db, sql)
hospital = hospital.rename(columns={0: "service_id", 1: "hospital_id"})
# print(hospital.head())
# print("hospital")
# print(hospital.count())
hospital["service_id"] = hospital["service_id"].astype("str")
df = pd.merge(df, hospital, on='service_id', how='left')
df = df.drop("service_id", axis=1)
print(df.count())
print("before") print("before")
print(df.shape) print(df.shape)
df = df.drop_duplicates()
df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer", df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date","app_list"]) "channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids"])
print("after") print("after")
print(df.shape) print(df.shape)
app_list_number,app_list_map = multi_hot(df,"app_list",1) app_list_number, app_list_map = multi_hot(df, "app_list", 2)
level2_number,level2_map = multi_hot(df,"clevel2_id",1+app_list_number) level2_number, level2_map = multi_hot(df, "clevel2_id", 2 + app_list_number)
# df["app_list"] = df["app_list"].fillna("lost_na") level3_number, level3_map = multi_hot(df, "level3_ids", 2 + app_list_number + level2_number)
# app_list_value = [i.split(",") for i in df["app_list"].unique()]
# app_list_unique = []
# for i in app_list_value:
# app_list_unique.extend(i)
# app_list_unique = list(set(app_list_unique))
# app_list_map = dict(zip(app_list_unique, list(range(1, len(app_list_unique) + 1))))
# df["app_list"] = df["app_list"].apply(app_list_func,args=(app_list_map,))
unique_values = [] unique_values = []
features = ["ucity_id", "ccity_name", "device_type", "manufacturer", features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"] "channel", "top", "time", "stat_date", "hospital_id",
"method", "min", "max", "treatment_time", "maintain_time", "recover_time"]
for i in features: for i in features:
df[i] = df[i].astype("str") df[i] = df[i].astype("str")
df[i] = df[i].fillna("lost") df[i] = df[i].fillna("lost")
...@@ -85,14 +105,16 @@ def get_data(): ...@@ -85,14 +105,16 @@ def get_data():
df[i] = df[i] + i df[i] = df[i] + i
unique_values.extend(list(df[i].unique())) unique_values.extend(list(df[i].unique()))
temp = list(range(1+app_list_number+level2_number, 1 + app_list_number+level2_number + len(unique_values))) temp = list(range(2 + app_list_number + level2_number + level3_number,
value_map = dict(zip(unique_values,temp)) 2 + app_list_number + level2_number + level3_number + len(unique_values)))
value_map = dict(zip(unique_values, temp))
df = df.drop("device_id", axis=1) df = df.drop("device_id", axis=1)
train = df[df["stat_date"] != validate_date+"stat_date"] train = df[df["stat_date"] != validate_date + "stat_date"]
test = df[df["stat_date"] == validate_date+"stat_date"] test = df[df["stat_date"] == validate_date + "stat_date"]
for i in ["ucity_id", "ccity_name", "device_type", "manufacturer", for i in ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"]: "channel", "top", "time", "stat_date", "hospital_id",
"method", "min", "max", "treatment_time", "maintain_time", "recover_time"]:
train[i] = train[i].map(value_map) train[i] = train[i].map(value_map)
test[i] = test[i].map(value_map) test[i] = test[i].map(value_map)
...@@ -101,10 +123,10 @@ def get_data(): ...@@ -101,10 +123,10 @@ def get_data():
print("test shape") print("test shape")
print(test.shape) print(test.shape)
write_csv(train, "tr",100000) write_csv(train, "tr", 100000)
write_csv(test, "va",80000) write_csv(test, "va", 80000)
return validate_date,value_map,app_list_map,level2_map return validate_date, value_map, app_list_map, level2_map, level3_map
def app_list_func(x,l): def app_list_func(x,l):
...@@ -129,10 +151,11 @@ def write_csv(df,name,n): ...@@ -129,10 +151,11 @@ def write_csv(df,name,n):
temp.to_csv(path + name+ "/{}_{}.csv".format(name,i), index=False) temp.to_csv(path + name+ "/{}_{}.csv".format(name,i), index=False)
def get_predict(date,value_map,app_list_map,level2_map): def get_predict(date,value_map,app_list_map,level2_map,level3_map):
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test') db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name," \ sql = "select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,dl.app_list " \ "u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time," \
"dl.app_list,e.hospital_id,feat.level3_ids,feat.level2 " \
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id " \ "from esmm_pre_data e left join user_feature u on e.device_id = u.device_id " \
"left join cid_type_top c on e.device_id = c.device_id " \ "left join cid_type_top c on e.device_id = c.device_id " \
"left join cid_time_cut cut on e.cid_id = cut.cid " \ "left join cid_time_cut cut on e.cid_id = cut.cid " \
...@@ -140,22 +163,39 @@ def get_predict(date,value_map,app_list_map,level2_map): ...@@ -140,22 +163,39 @@ def get_predict(date,value_map,app_list_map,level2_map):
"left join diary_feat feat on e.cid_id = feat.diary_id" "left join diary_feat feat on e.cid_id = feat.diary_id"
df = con_sql(db, sql) df = con_sql(db, sql)
df = df.rename(columns={0: "y", 1: "z", 2: "label", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name", df = df.rename(columns={0: "y", 1: "z", 2: "label", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name",
6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
10: "device_id", 11: "cid_id", 12: "time",13:"app_list"}) 11: "cid_id", 12: "time", 13: "app_list", 14: "hospital_id", 15: "level3_ids",
16: "level2"})
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select level2_id,treatment_method,price_min,price_max,treatment_time,maintain_time,recover_time " \
"from train_Knowledge_network_data"
knowledge = con_sql(db, sql)
knowledge = knowledge.rename(columns={0: "level2", 1: "method", 2: "min", 3: "max",
4: "treatment_time", 5: "maintain_time", 6: "recover_time"})
knowledge["level2"] = knowledge["level2"].astype("str")
df = pd.merge(df, knowledge, on='level2', how='left')
df = df.drop("level2", axis=1)
df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "app_list", "hospital_id", "level3_ids"])
df["stat_date"] = date df["stat_date"] = date
print(df.head(6)) print(df.head(6))
df["app_list"] = df["app_list"].fillna("lost_na") df["app_list"] = df["app_list"].fillna("lost_na")
df["app_list"] = df["app_list"].apply(app_list_func,args=(app_list_map,)) df["app_list"] = df["app_list"].apply(app_list_func, args=(app_list_map,))
df["clevel2_id"] = df["clevel2_id"].fillna("lost_na") df["clevel2_id"] = df["clevel2_id"].fillna("lost_na")
df["clevel2_id"] = df["clevel2_id"].apply(app_list_func, args=(level2_map,)) df["clevel2_id"] = df["clevel2_id"].apply(app_list_func, args=(level2_map,))
df["level3_ids"] = df["level3_ids"].fillna("lost_na")
df["level3_ids"] = df["level3_ids"].apply(app_list_func, args=(level3_map,))
# print("predict shape") # print("predict shape")
# print(df.shape) # print(df.shape)
df["uid"] = df["device_id"] df["uid"] = df["device_id"]
df["city"] = df["ucity_id"] df["city"] = df["ucity_id"]
features = ["ucity_id", "ccity_name", "device_type", "manufacturer", features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"] "channel", "top", "time", "stat_date", "hospital_id",
"method", "min", "max", "treatment_time", "maintain_time", "recover_time"]
for i in features: for i in features:
df[i] = df[i].astype("str") df[i] = df[i].astype("str")
df[i] = df[i].fillna("lost") df[i] = df[i].fillna("lost")
...@@ -167,7 +207,8 @@ def get_predict(date,value_map,app_list_map,level2_map): ...@@ -167,7 +207,8 @@ def get_predict(date,value_map,app_list_map,level2_map):
nearby_pre = nearby_pre.drop("label", axis=1) nearby_pre = nearby_pre.drop("label", axis=1)
for i in ["ucity_id", "ccity_name", "device_type", "manufacturer", for i in ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"]: "channel", "top", "time", "stat_date", "hospital_id",
"method", "min", "max", "treatment_time", "maintain_time", "recover_time"]:
native_pre[i] = native_pre[i].map(value_map) native_pre[i] = native_pre[i].map(value_map)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下 # TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre[i] = native_pre[i].fillna(0) native_pre[i] = native_pre[i].fillna(0)
...@@ -176,23 +217,23 @@ def get_predict(date,value_map,app_list_map,level2_map): ...@@ -176,23 +217,23 @@ def get_predict(date,value_map,app_list_map,level2_map):
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下 # TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
nearby_pre[i] = nearby_pre[i].fillna(0) nearby_pre[i] = nearby_pre[i].fillna(0)
print("native") print("native")
print(native_pre.shape) print(native_pre.shape)
native_pre[["uid","city","cid_id"]].to_csv(path+"native.csv",index=False) native_pre[["uid", "city", "cid_id"]].to_csv(path + "native.csv", index=False)
write_csv(native_pre, "native",200000) write_csv(native_pre, "native", 200000)
print("nearby") print("nearby")
print(nearby_pre.shape) print(nearby_pre.shape)
nearby_pre[["uid","city","cid_id"]].to_csv(path+"nearby.csv",index=False) nearby_pre[["uid", "city", "cid_id"]].to_csv(path + "nearby.csv", index=False)
write_csv(nearby_pre, "nearby", 160000) write_csv(nearby_pre, "nearby", 160000)
if __name__ == '__main__': if __name__ == '__main__':
train_data_set = "esmm_train_data" train_data_set = "esmm_train_data"
path = "/data/esmm/" path = "/data/esmm/"
date,value,app_list,level2 = get_data() date, value, app_list, level2, level3 = get_data()
get_predict(date, value,app_list,level2) get_predict(date, value, app_list, level2, level3)
...@@ -32,15 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_* ...@@ -32,15 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_*
echo "train..." echo "train..."
${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=8 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH} --task_type=train ${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=15 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH} --task_type=train
echo "infer native..." echo "infer native..."
${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=8 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH}/native --task_type=infer > ${DATA_PATH}/native_infer.log ${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=15 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH}/native --task_type=infer > ${DATA_PATH}/native_infer.log
echo "infer nearby..." echo "infer nearby..."
${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=8 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH}/nearby --task_type=infer > ${DATA_PATH}/nearby_infer.log ${PYTHON_PATH} ${MODEL_PATH}/train.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=1024 --field_size=15 --feature_size=300000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${DATA_PATH}/model_ckpt/DeepCvrMTL/ --data_dir=${DATA_PATH}/nearby --task_type=infer > ${DATA_PATH}/nearby_infer.log
echo "sort and 2sql" echo "sort and 2sql"
${PYTHON_PATH} ${MODEL_PATH}/to_database.py > ${DATA_PATH}/insert_database.log ${PYTHON_PATH} ${MODEL_PATH}/to_database.py > ${DATA_PATH}/insert_database.log
...@@ -21,6 +21,7 @@ tf.app.flags.DEFINE_string("input_dir", "./", "input dir") ...@@ -21,6 +21,7 @@ tf.app.flags.DEFINE_string("input_dir", "./", "input dir")
tf.app.flags.DEFINE_string("output_dir", "./", "output dir") tf.app.flags.DEFINE_string("output_dir", "./", "output dir")
tf.app.flags.DEFINE_integer("threads", 16, "threads num") tf.app.flags.DEFINE_integer("threads", 16, "threads num")
def gen_tfrecords(in_file): def gen_tfrecords(in_file):
basename = os.path.basename(in_file) + ".tfrecord" basename = os.path.basename(in_file) + ".tfrecord"
out_file = os.path.join(FLAGS.output_dir, basename) out_file = os.path.join(FLAGS.output_dir, basename)
...@@ -29,18 +30,21 @@ def gen_tfrecords(in_file): ...@@ -29,18 +30,21 @@ def gen_tfrecords(in_file):
for i in range(df.shape[0]): for i in range(df.shape[0]):
feats = ["ucity_id", "ccity_name", "device_type", "manufacturer", feats = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"] "channel", "top", "time", "stat_date", "hospital_id",
"method", "min", "max", "treatment_time", "maintain_time", "recover_time"]
id = np.array([]) id = np.array([])
for j in feats: for j in feats:
id = np.append(id,df[j][i]) id = np.append(id,df[j][i])
app_list = np.array(str(df["app_list"][i]).split(",")) app_list = np.array(str(df["app_list"][i]).split(","))
level2_list = np.array(str(df["clevel2_id"][i]).split(",")) level2_list = np.array(str(df["clevel2_id"][i]).split(","))
level3_list = np.array(str(df["level3_ids"][i]).split(","))
features = tf.train.Features(feature={ features = tf.train.Features(feature={
"y": tf.train.Feature(float_list=tf.train.FloatList(value=[df["y"][i]])), "y": tf.train.Feature(float_list=tf.train.FloatList(value=[df["y"][i]])),
"z": tf.train.Feature(float_list=tf.train.FloatList(value=[df["z"][i]])), "z": tf.train.Feature(float_list=tf.train.FloatList(value=[df["z"][i]])),
"ids": tf.train.Feature(int64_list=tf.train.Int64List(value=id.astype(np.int))), "ids": tf.train.Feature(int64_list=tf.train.Int64List(value=id.astype(np.int))),
"app_list":tf.train.Feature(int64_list=tf.train.Int64List(value=app_list.astype(np.int))), "app_list": tf.train.Feature(int64_list=tf.train.Int64List(value=app_list.astype(np.int))),
"level2_list": tf.train.Feature(int64_list=tf.train.Int64List(value=level2_list.astype(np.int))) "level2_list": tf.train.Feature(int64_list=tf.train.Int64List(value=level2_list.astype(np.int))),
"level3_list": tf.train.Feature(int64_list=tf.train.Int64List(value=level3_list.astype(np.int)))
}) })
example = tf.train.Example(features = features) example = tf.train.Example(features = features)
......
...@@ -55,7 +55,8 @@ def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False): ...@@ -55,7 +55,8 @@ def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
"z": tf.FixedLenFeature([], tf.float32), "z": tf.FixedLenFeature([], tf.float32),
"ids": tf.FixedLenFeature([FLAGS.field_size], tf.int64), "ids": tf.FixedLenFeature([FLAGS.field_size], tf.int64),
"app_list": tf.VarLenFeature(tf.int64), "app_list": tf.VarLenFeature(tf.int64),
"level2_list": tf.VarLenFeature(tf.int64) "level2_list": tf.VarLenFeature(tf.int64),
"level3_list": tf.VarLenFeature(tf.int64)
} }
parsed = tf.parse_single_example(record, features) parsed = tf.parse_single_example(record, features)
...@@ -103,6 +104,7 @@ def model_fn(features, labels, mode, params): ...@@ -103,6 +104,7 @@ def model_fn(features, labels, mode, params):
feat_ids = features['ids'] feat_ids = features['ids']
app_list = features['app_list'] app_list = features['app_list']
level2_list = features['level2_list'] level2_list = features['level2_list']
level3_list = features['level3_list']
if FLAGS.task_type != "infer": if FLAGS.task_type != "infer":
y = labels['y'] y = labels['y']
...@@ -113,10 +115,11 @@ def model_fn(features, labels, mode, params): ...@@ -113,10 +115,11 @@ def model_fn(features, labels, mode, params):
embedding_id = tf.nn.embedding_lookup(Feat_Emb,feat_ids) embedding_id = tf.nn.embedding_lookup(Feat_Emb,feat_ids)
app_id = tf.nn.embedding_lookup_sparse(Feat_Emb, sp_ids=app_list, sp_weights=None, combiner="sum") app_id = tf.nn.embedding_lookup_sparse(Feat_Emb, sp_ids=app_list, sp_weights=None, combiner="sum")
level2 = tf.nn.embedding_lookup_sparse(Feat_Emb, sp_ids=level2_list, sp_weights=None, combiner="sum") level2 = tf.nn.embedding_lookup_sparse(Feat_Emb, sp_ids=level2_list, sp_weights=None, combiner="sum")
level3 = tf.nn.embedding_lookup_sparse(Feat_Emb, sp_ids=level3_list, sp_weights=None, combiner="sum")
# x_concat = tf.reshape(embedding_id,shape=[-1, common_dims]) # None * (F * K) # x_concat = tf.reshape(embedding_id,shape=[-1, common_dims]) # None * (F * K)
x_concat = tf.concat([tf.reshape(embedding_id,shape=[-1,common_dims]),app_id,level2], axis=1) x_concat = tf.concat([tf.reshape(embedding_id,shape=[-1,common_dims]),app_id,level2,level3], axis=1)
with tf.name_scope("CVR_Task"): with tf.name_scope("CVR_Task"):
if mode == tf.estimator.ModeKeys.TRAIN: if mode == tf.estimator.ModeKeys.TRAIN:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment