Commit c8de10cd authored by 张彦钊's avatar 张彦钊

change test file

parent 8029d31e
...@@ -366,25 +366,28 @@ def doris(): ...@@ -366,25 +366,28 @@ def doris():
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d") temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=3)).strftime("%Y-%m-%d") start = (temp - datetime.timedelta(days=3)).strftime("%Y-%m-%d")
print(start) print(start)
sql = "select e.y,e.z,e.stat_date,e.ucity_id,doris.search_tag2,doris.search_tag3 " \ # sql = "select e.y,e.z,e.stat_date,e.ucity_id,doris.search_tag2,doris.search_tag3 " \
"from jerry_test.esmm_train_data_dwell e " \ # "from jerry_test.esmm_train_data_dwell e " \
"left join jerry_test.search_doris doris on e.device_id = doris.device_id " \ # "left join jerry_test.search_doris doris on e.device_id = doris.device_id " \
"and e.stat_date = doris.get_date " \ # "and e.stat_date = doris.get_date " \
"where e.stat_date >= '{}'".format(start) # "where e.stat_date >= '{}'".format(start)
#
df = spark.sql(sql) # df = spark.sql(sql)
print(df.count()) # print(df.count())
sql = "select y,z,stat_date,ucity_id from jerry_test.esmm_train_data_dwell " \ # sql = "select y,z,stat_date,ucity_id from jerry_test.esmm_train_data_dwell " \
"where stat_date >= '{}'".format(start) # "where stat_date >= '{}'".format(start)
#
df = spark.sql(sql) # df = spark.sql(sql)
print(df.count()) # print(df.count())
sql = "select e.y,e.z,e.label,e.ucity_id,doris.search_tag2,doris.search_tag3 " \ sql = "select e.y,e.z,e.label,e.ucity_id,doris.search_tag2,doris.search_tag3 " \
"from jerry_test.esmm_pre_data e " \ "from jerry_test.esmm_pre_data e " \
"left join jerry_test.search_doris doris on e.device_id = doris.device_id " \ "left join jerry_test.search_doris doris on e.device_id = doris.device_id " \
"and e.stat_date = doris.get_date " \ "and e.stat_date = doris.get_date"
"where doris.get_date = '{}'".format(validate_date) df = spark.sql(sql)
print(df.count())
sql = "select y,z from jerry_test.esmm_pre_data"
df = spark.sql(sql) df = spark.sql(sql)
print(df.count()) print(df.count())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment