Commit 3c206ad4 authored by 张彦钊's avatar 张彦钊

change test file

parent 065787bb
......@@ -198,20 +198,25 @@ def con_sql(db,sql):
def test():
# sql = "select stat_date,cid_id from esmm_train_data e where stat_date = '{}' limit 60".format("2019-04-25")
# df = spark.createDataFrame(spark.sql(sql).rdd.map(lambda x:(x[0],x[1])).zipWithIndex()
# .map(lambda x:(x[1],x[0][0],x[0][1]))).toDF("ind","k","v")
# df.show(6)
sql = "select stat_date,cid_id from esmm_train_data e where stat_date = '{}' limit 60".format("2019-04-25")
df = spark.createDataFrame(spark.sql(sql).rdd.map(lambda x:(x[0],x[1])).zipWithIndex()
.map(lambda x:(x[1],x[0][0],x[0][1]))).toDF("ind","k","v")
df.show(6)
# df.write.csv('/recommend/tr', mode='overwrite', header=True)
df = df.toPandas()
from hdfs import InsecureClient
from hdfs.ext.dataframe import read_dataframe
from hdfs.ext.dataframe import write_dataframe
client = InsecureClient('http://nvwa01:50070')
df = read_dataframe(client,"/recommend/tr/part-00000-7f54d878-69a3-4a31-8aaa-7688f4e2aa10-c000.csv")
client = InsecureClient('http://nvwa01:50070')
write_dataframe(client, '/recommend/va', df)
print(df.head())
# df = read_dataframe(client,"/recommend/tr/part-00000-7f54d878-69a3-4a31-8aaa-7688f4e2aa10-c000.csv")
#
# print(df.head())
# spark.sql("use online")
# spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment