Commit 0eacdc06 authored by 张彦钊's avatar 张彦钊

增加日记视频集合是空的判断

No related merge requests found
......@@ -21,9 +21,12 @@ def get_video_id():
result = cursor.fetchall()
df = pd.DataFrame(list(result))
print("videio_id 预览")
print(df.head(2))
video_id = df[0].values.tolist()
print(df.head(1))
db.close()
if df.empty:
return False
else:
video_id = df[0].values.tolist()
return video_id
......@@ -109,9 +112,8 @@ def get_score(queue_arg):
def update_dairy_queue(score_df,predict_score_df,total_video_id):
diary_id = score_df["cid"].values.tolist()
if total_video_id:
video_id = list(set(diary_id)&set(total_video_id))
if len(video_id)>0:
not_video = list(set(diary_id) - set(video_id))
# 为了相加时cid能够匹配,先把cid变成索引
......@@ -135,7 +137,7 @@ def update_dairy_queue(score_df,predict_score_df,total_video_id):
# print("分数合并成功")
return new_queue
# 如果没有视频日记
# 如果取交集后没有视频日记
else:
score_df = score_df.set_index(["cid"])
predict_score_df = predict_score_df.set_index(["cid"])
......@@ -143,6 +145,15 @@ def update_dairy_queue(score_df,predict_score_df,total_video_id):
score_df = score_df.sort_values(by="score", ascending=False)
# print("分数合并成功1")
return score_df.index.tolist()
# 如果total_video_id是空
else:
score_df = score_df.set_index(["cid"])
predict_score_df = predict_score_df.set_index(["cid"])
score_df["score"] = score_df["score"] + predict_score_df["score"]
score_df = score_df.sort_values(by="score", ascending=False)
# print("分数合并成功1")
return score_df.index.tolist()
def update_sql_dairy_queue(queue_name, diary_id,device_id, city_id):
......
......@@ -6,12 +6,12 @@ import time
def fetch_data(start_date, end_date):
# 获取点击表里的device_id
sql = "select distinct device_id from data_feed_click"
sql = "select distinct device_id from data_feed_click2"
click_device_id = con_sql(sql)[0].values.tolist()
print("成功获取点击表里的device_id")
# 获取点击表里的数据
sql = "select cid,device_id,time,stat_date from data_feed_click " \
sql = "select cid,device_id,time,stat_date from data_feed_click2 " \
"where stat_date >= '{0}' and stat_date <= '{1}'".format(start_date, end_date)
click = con_sql(sql)
click = click.rename(columns={0: "cid", 1: "device_id", 2: "time_date", 3: "stat_date"})
......@@ -22,7 +22,7 @@ def fetch_data(start_date, end_date):
click = click.drop("time_date", axis=1)
# 获取曝光表里的数据
sql = "select cid,device_id,time,stat_date from data_feed_exposure " \
sql = "select cid,device_id,time,stat_date from data_feed_exposure2 " \
"where stat_date >= '{0}' and stat_date <= '{1}'".format(start_date, end_date)
start = time.time()
exposure = con_sql(sql)
......
......@@ -48,7 +48,7 @@ def feature_en(data_start_date, data_end_date, validation_date, test_date):
data["hour"] = data["hour"].astype("category")
data["minute"] = data["minute"].astype("category")
# 持久化候选cid
# 持久化候选cid,选预测候选集时用这个过滤
data_set_cid = data["cid"].unique()
cid_df = pd.DataFrame()
cid_df['cid'] = data_set_cid
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment