Commit 3113c918 authored by 张彦钊's avatar 张彦钊

change test file

parent 1fd1f8ce
......@@ -173,28 +173,28 @@ def feature_engineer():
16 + apps_number + level2_number + level3_number + len(unique_values)))
value_map = dict(zip(unique_values, temp))
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
"u.channel,c.top,cut.time,dl.app_list,feat.level3_ids,doctor.hospital_id," \
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4," \
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7," \
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time " \
"from jerry_test.esmm_train_data_dwell e left join jerry_test.user_feature u on e.device_id = u.device_id " \
"left join jerry_test.cid_type_top c on e.device_id = c.device_id " \
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid " \
"left join jerry_test.device_app_list dl on e.device_id = dl.device_id " \
"left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id " \
"left join jerry_test.knowledge k on feat.level2 = k.level2_id " \
"left join jerry_test.wiki_tag wiki on e.device_id = wiki.device_id " \
"left join jerry_test.question_tag question on e.device_id = question.device_id " \
"left join jerry_test.search_tag search on e.device_id = search.device_id " \
"left join jerry_test.budan_tag budan on e.device_id = budan.device_id " \
"left join jerry_test.order_tag ot on e.device_id = ot.device_id " \
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id " \
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id " \
"left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id " \
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id " \
"where e.stat_date >= '{}'".format(start)
# sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
# "u.channel,c.top,cut.time,dl.app_list,feat.level3_ids,doctor.hospital_id," \
# "wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4," \
# "ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7," \
# "k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time " \
# "from jerry_test.esmm_train_data_dwell e left join jerry_test.user_feature u on e.device_id = u.device_id " \
# "left join jerry_test.cid_type_top c on e.device_id = c.device_id " \
# "left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid " \
# "left join jerry_test.device_app_list dl on e.device_id = dl.device_id " \
# "left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id " \
# "left join jerry_test.knowledge k on feat.level2 = k.level2_id " \
# "left join jerry_test.wiki_tag wiki on e.device_id = wiki.device_id " \
# "left join jerry_test.question_tag question on e.device_id = question.device_id " \
# "left join jerry_test.search_tag search on e.device_id = search.device_id " \
# "left join jerry_test.budan_tag budan on e.device_id = budan.device_id " \
# "left join jerry_test.order_tag ot on e.device_id = ot.device_id " \
# "left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id " \
# "left join jerry_test.cart_tag cart on e.device_id = cart.device_id " \
# "left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id " \
# "left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id " \
# "where e.stat_date >= '{}'".format(start)
#
# df = spark.sql(sql)
#
# df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
......@@ -240,7 +240,7 @@ def feature_engineer():
# print(rdd.count())
#
# get_pre_number()
#
# test = rdd.filter(lambda x: x[0] == validate_date).map(
# lambda x: (x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9],
# x[10], x[11], x[12], x[13],x[14]))
......@@ -251,7 +251,7 @@ def feature_engineer():
# .repartition(1).write.format("tfrecords").save(path=path + "va/", mode="overwrite")
#
# print("va tfrecord done")
#
# rdd.unpersist()
return validate_date, value_map, app_list_map, leve2_map, leve3_map
......@@ -314,29 +314,31 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER)
native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5],x[17])))\
.toDF("city","uid","cid_id","number")
print("native csv")
native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
# native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5],x[17])))\
# .toDF("city","uid","cid_id","number")
# print("native csv")
# native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
.map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16],x[17]))) \
.map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],
x[12],x[13],x[14],x[15],x[16],x[17],x[3],x[4],x[5]))) \
.toDF("y","z","app_list", "level2_list", "level3_list","tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids","number").repartition(100).write.format("tfrecords") \
"tag5_list", "tag6_list", "tag7_list", "ids","number","city","uid","cid_id").repartition(100).write.format("tfrecords") \
.save(path=path+"test_native/", mode="overwrite")
print("native tfrecord done")
h = time.time()
print((h-f)/60)
nearby_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5],x[17]))) \
.toDF("city", "uid", "cid_id","number")
print("nearby csv")
nearby_pre.toPandas().to_csv(local_path + "nearby.csv", header=True)
# nearby_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5],x[17]))) \
# .toDF("city", "uid", "cid_id","number")
# print("nearby csv")
# nearby_pre.toPandas().to_csv(local_path + "nearby.csv", header=True)
spark.createDataFrame(rdd.filter(lambda x: x[0] == 1)
.map(
lambda x: (x[1], x[2], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15], x[16],x[17]))) \
lambda x: (x[1], x[2], x[6], x[7], x[8], x[9], x[10], x[11], x[12],
x[13], x[14], x[15], x[16],x[17],x[3],x[4],x[5]))) \
.toDF("y", "z", "app_list", "level2_list", "level3_list", "tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids","number").repartition(100).write.format("tfrecords") \
"tag5_list", "tag6_list", "tag7_list", "ids","number","city","uid","cid_id").repartition(100).write.format("tfrecords") \
.save(path=path + "test_nearby/", mode="overwrite")
print("nearby tfrecord done")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment