Commit ef64e1b8 authored by 张彦钊's avatar 张彦钊

change test file

parent fbf08e2c
......@@ -198,12 +198,12 @@ def con_sql(db,sql):
def test():
sql = "select stat_date,cid_id from esmm_train_data e where stat_date = '{}' limit 60".format("2019-04-25")
df = spark.createDataFrame(spark.sql(sql).rdd.map(lambda x:(x[0],x[1])).zipWithIndex()
.map(lambda x:(x[1],x[0][0],x[0][1]))).toDF("ind","k","v")
df.show(6)
# df.write.csv('/recommend/tr', mode='overwrite', header=True)
df = df.toPandas()
# sql = "select stat_date,cid_id from esmm_train_data e where stat_date = '{}' limit 60".format("2019-04-25")
# df = spark.createDataFrame(spark.sql(sql).rdd.map(lambda x:(x[0],x[1])).zipWithIndex()
# .map(lambda x:(x[1],x[0][0],x[0][1]))).toDF("ind","k","v")
# df.show(6)
# # df.write.csv('/recommend/tr', mode='overwrite', header=True)
# df = df.toPandas()
from hdfs import InsecureClient
......@@ -212,11 +212,11 @@ def test():
client = InsecureClient('http://nvwa01:50070')
write_dataframe(client, '/recommend/va/a.csv', df)
# write_dataframe(client, '/recommend/va/a.csv', df)
# df = read_dataframe(client,"/recommend/tr/part-00000-7f54d878-69a3-4a31-8aaa-7688f4e2aa10-c000.csv")
#
# print(df.head())
df = read_dataframe(client,"/recommend/va/a.csv")
print(df.head())
# spark.sql("use online")
# spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment