Commit 89b82410 authored by 张彦钊's avatar 张彦钊

change test file

parent 3387d6ea
......@@ -195,51 +195,51 @@ def feature_engineer():
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id " \
"where e.stat_date >= '{}'".format(start)
df = spark.sql(sql)
df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids",
"tag1", "tag2", "tag3", "tag4", "tag5", "tag6", "tag7"])
df = df.na.fill(dict(zip(features, features)))
rdd = df.select("stat_date", "y", "z", "app_list", "level2_ids", "level3_ids",
"tag1", "tag2", "tag3", "tag4", "tag5", "tag6", "tag7",
"ucity_id", "ccity_name", "device_type", "manufacturer", "channel", "top", "time",
"hospital_id", "treatment_method", "price_min", "price_max", "treatment_time",
"maintain_time", "recover_time").rdd.repartition(200).map(
lambda x: (x[0], float(x[1]), float(x[2]), app_list_func(x[3], app_list_map), app_list_func(x[4], leve2_map),
app_list_func(x[5], leve3_map), app_list_func(x[6], leve2_map), app_list_func(x[7], leve2_map),
app_list_func(x[8], leve2_map), app_list_func(x[9], leve2_map), app_list_func(x[10], leve2_map),
app_list_func(x[11], leve2_map), app_list_func(x[12], leve2_map),
[value_map[x[0]], value_map[x[13]], value_map[x[14]], value_map[x[15]], value_map[x[16]],
value_map[x[17]], value_map[x[18]], value_map[x[19]], value_map[x[20]], value_map[x[21]],
value_map[x[22]], value_map[x[23]], value_map[x[24]], value_map[x[25]], value_map[x[26]]]))\
.zipWithIndex().map(lambda x:(x[0][0],x[0][1],x[0][2],x[0][3],x[0][4],x[0][5],x[0][6],x[0][7],x[0][8],
x[0][9],x[0][10],x[0][11],x[0][12],x[0][13],
x[1]))
rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER)
# TODO 上线后把下面train fliter 删除,因为最近一天的数据也要作为训练集
train = rdd.map(
lambda x: (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9],
x[10], x[11], x[12], x[13],x[14]))
f = time.time()
spark.createDataFrame(train).toDF("y", "z", "app_list", "level2_list", "level3_list",
"tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids","number") \
.repartition(1).write.format("tfrecords").save(path=path + "test_tr/", mode="overwrite")
h = time.time()
print("train tfrecord done")
print((h - f) / 60)
print("训练集样本总量:")
print(rdd.count())
get_pre_number()
# df = spark.sql(sql)
#
# df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
# "channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids",
# "tag1", "tag2", "tag3", "tag4", "tag5", "tag6", "tag7"])
#
# df = df.na.fill(dict(zip(features, features)))
#
# rdd = df.select("stat_date", "y", "z", "app_list", "level2_ids", "level3_ids",
# "tag1", "tag2", "tag3", "tag4", "tag5", "tag6", "tag7",
# "ucity_id", "ccity_name", "device_type", "manufacturer", "channel", "top", "time",
# "hospital_id", "treatment_method", "price_min", "price_max", "treatment_time",
# "maintain_time", "recover_time").rdd.repartition(200).map(
# lambda x: (x[0], float(x[1]), float(x[2]), app_list_func(x[3], app_list_map), app_list_func(x[4], leve2_map),
# app_list_func(x[5], leve3_map), app_list_func(x[6], leve2_map), app_list_func(x[7], leve2_map),
# app_list_func(x[8], leve2_map), app_list_func(x[9], leve2_map), app_list_func(x[10], leve2_map),
# app_list_func(x[11], leve2_map), app_list_func(x[12], leve2_map),
# [value_map[x[0]], value_map[x[13]], value_map[x[14]], value_map[x[15]], value_map[x[16]],
# value_map[x[17]], value_map[x[18]], value_map[x[19]], value_map[x[20]], value_map[x[21]],
# value_map[x[22]], value_map[x[23]], value_map[x[24]], value_map[x[25]], value_map[x[26]]]))\
# .zipWithIndex().map(lambda x:(x[0][0],x[0][1],x[0][2],x[0][3],x[0][4],x[0][5],x[0][6],x[0][7],x[0][8],
# x[0][9],x[0][10],x[0][11],x[0][12],x[0][13],
# x[1]))
#
#
# rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER)
#
# # TODO 上线后把下面train fliter 删除,因为最近一天的数据也要作为训练集
#
# train = rdd.map(
# lambda x: (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9],
# x[10], x[11], x[12], x[13],x[14]))
# f = time.time()
# spark.createDataFrame(train).toDF("y", "z", "app_list", "level2_list", "level3_list",
# "tag1_list", "tag2_list", "tag3_list", "tag4_list",
# "tag5_list", "tag6_list", "tag7_list", "ids","number") \
# .repartition(1).write.format("tfrecords").save(path=path + "test_tr/", mode="overwrite")
# h = time.time()
# print("train tfrecord done")
# print((h - f) / 60)
#
# print("训练集样本总量:")
# print(rdd.count())
#
# get_pre_number()
# test = rdd.filter(lambda x: x[0] == validate_date).map(
# lambda x: (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9],
......@@ -252,7 +252,7 @@ def feature_engineer():
#
# print("va tfrecord done")
rdd.unpersist()
# rdd.unpersist()
return validate_date, value_map, app_list_map, leve2_map, leve3_map
......@@ -277,8 +277,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
"left join jerry_test.order_tag ot on e.device_id = ot.device_id " \
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id " \
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id " \
"left join jerry_test.knowledge k on feat.level2 = k.level2_id" \
" where e.label = 1 limit 60000"
"left join jerry_test.knowledge k on feat.level2 = k.level2_id"
features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "hospital_id",
......@@ -315,18 +314,18 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER)
# native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5])))\
# .toDF("city","uid","cid_id")
# print("native csv")
# native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
# spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
# .map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16],x[17]))) \
# .toDF("y","z","app_list", "level2_list", "level3_list","tag1_list", "tag2_list", "tag3_list", "tag4_list",
# "tag5_list", "tag6_list", "tag7_list", "ids","number").repartition(100).write.format("tfrecords") \
# .save(path=path+"native/", mode="overwrite")
# print("native tfrecord done")
# h = time.time()
# print((h-f)/60)
native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5])))\
.toDF("city","uid","cid_id")
print("native csv")
native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
.map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16],x[17]))) \
.toDF("y","z","app_list", "level2_list", "level3_list","tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids","number").repartition(100).write.format("tfrecords") \
.save(path=path+"test_native/", mode="overwrite")
print("native tfrecord done")
h = time.time()
print((h-f)/60)
nearby_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5]))) \
.toDF("city", "uid", "cid_id")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment