Commit 5c01f3a6 authored by 张彦钊's avatar 张彦钊

训练集增加日记三级标签、医院id

parent 4bc040e6
......@@ -40,8 +40,8 @@ def get_data():
start = (temp - datetime.timedelta(days=300)).strftime("%Y-%m-%d")
print(start)
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,cut.time,dl.app_list " \
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
"u.channel,c.top,e.device_id,cut.time,dl.app_list,e.diary_service_id,feat.level3_ids " \
"from {} e left join user_feature u on e.device_id = u.device_id " \
"left join cid_type_top c on e.device_id = c.device_id " \
"left join cid_time_cut cut on e.cid_id = cut.cid " \
......@@ -52,31 +52,31 @@ def get_data():
# print(df.shape)
df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name",
6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
11: "time",12:"app_list"})
11: "time",12:"app_list",13:"service_id",14:"level3_ids"})
print("esmm data ok")
print(df.shape)
# print(df.head(2)
service_id = df["service_id"].unique()
db = pymysql.connect(host='rdsfewzdmf0jfjp9un8xj.mysql.rds.aliyuncs.com', port=3306, user='work', passwd='BJQaT9VzDcuPBqkd', db='zhengxing')
sql = "select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id where s.id in {}".format(service_id)
hospital = con_sql(db, sql)
hospital = hospital.rename(columns={0: "service_id", 1: "hospital_id"})
df = pd.merge(df, hospital, on='service_id', how='left')
print("before")
print(df.shape)
print("after")
df = df.drop_duplicates()
df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date","app_list"])
"channel", "top", "time", "stat_date","app_list","hospital_id","level3_ids"])
app_list_number,app_list_map = multi_hot(df,"app_list",1)
level2_number,level2_map = multi_hot(df,"clevel2_id",1+app_list_number)
# df["app_list"] = df["app_list"].fillna("lost_na")
# app_list_value = [i.split(",") for i in df["app_list"].unique()]
# app_list_unique = []
# for i in app_list_value:
# app_list_unique.extend(i)
# app_list_unique = list(set(app_list_unique))
# app_list_map = dict(zip(app_list_unique, list(range(1, len(app_list_unique) + 1))))
# df["app_list"] = df["app_list"].apply(app_list_func,args=(app_list_map,))
level3_number,level3_ids = multi_hot(df, "hospital_id", 1 + app_list_number + level2_number)
unique_values = []
features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"]
"channel", "top", "time", "stat_date","hospital_id"]
for i in features:
df[i] = df[i].astype("str")
df[i] = df[i].fillna("lost")
......@@ -84,14 +84,14 @@ def get_data():
df[i] = df[i] + i
unique_values.extend(list(df[i].unique()))
temp = list(range(1+app_list_number+level2_number, 1 + app_list_number+level2_number + len(unique_values)))
temp = list(range(1+app_list_number+level2_number + level3_number, 1 + app_list_number+level2_number + level3_number + len(unique_values)))
value_map = dict(zip(unique_values,temp))
df = df.drop("device_id", axis=1)
train = df[df["stat_date"] != validate_date+"stat_date"]
test = df[df["stat_date"] == validate_date+"stat_date"]
for i in ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date"]:
"channel", "top", "time", "stat_date","hospital_id"]:
train[i] = train[i].map(value_map)
test[i] = test[i].map(value_map)
......@@ -193,5 +193,5 @@ if __name__ == '__main__':
train_data_set = "esmm_train_data"
path = "/data/esmm/"
date,value,app_list,level2 = get_data()
get_predict(date, value,app_list,level2)
# get_predict(date, value,app_list,level2)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment