Commit 1fd1f8ce authored by 张彦钊's avatar 张彦钊

change test file

parent 89b82410
...@@ -314,8 +314,8 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map): ...@@ -314,8 +314,8 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER) rdd.persist(storageLevel= StorageLevel.MEMORY_ONLY_SER)
native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5])))\ native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5],x[17])))\
.toDF("city","uid","cid_id") .toDF("city","uid","cid_id","number")
print("native csv") print("native csv")
native_pre.toPandas().to_csv(local_path+"native.csv", header=True) native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
spark.createDataFrame(rdd.filter(lambda x: x[0] == 0) spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
...@@ -327,8 +327,8 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map): ...@@ -327,8 +327,8 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
h = time.time() h = time.time()
print((h-f)/60) print((h-f)/60)
nearby_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5]))) \ nearby_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5],x[17]))) \
.toDF("city", "uid", "cid_id") .toDF("city", "uid", "cid_id","number")
print("nearby csv") print("nearby csv")
nearby_pre.toPandas().to_csv(local_path + "nearby.csv", header=True) nearby_pre.toPandas().to_csv(local_path + "nearby.csv", header=True)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment