Commit 342779ca authored by 赵威's avatar 赵威

update db

parent ef7819be
......@@ -267,23 +267,22 @@ def update_tag3_user_portrait(cl_id):
(len(first_solutions_score.keys()) > 0) or (len(second_solutions_score.keys()) > 0) or \
(len(first_positions_score.keys()) > 0) or (len(second_positions_score.keys()) > 0) or \
(len(projects_score.keys()) > 0):
# TODO
# redis_client.set(key, json.dumps(res))
# redis_client.expire(key, 60 * 60 * 24 * 180)
redis_client.set(key, json.dumps(res))
redis_client.expire(key, 60 * 60 * 24 * 180)
# write_user_portrait(cl_id, ",".join(first_solutions_score.keys()), ",".join(second_solutions_score.keys()),
# ",".join(first_demands_score.keys()), ",".join(second_demands_score.keys()),
# ",".join(first_positions_score.keys()), ",".join(second_positions_score.keys()),
# ",".join(projects_score.keys()))
write_user_portrait(cl_id, ",".join(first_solutions_score.keys()), ",".join(second_solutions_score.keys()),
",".join(first_demands_score.keys()), ",".join(second_demands_score.keys()),
",".join(first_positions_score.keys()), ",".join(second_positions_score.keys()),
",".join(projects_score.keys()))
body = {}
for (k, v) in res.items():
body[k] = list(v.keys())
# body = {}
# for (k, v) in res.items():
# body[k] = list(v.keys())
body["device_id"] = cl_id
body["last_modified"] = datetime.datetime.strftime(datetime.datetime.now(pytz.timezone("Asia/Shanghai")),
"%Y-%m-%dT%H:%M:%S.%f")[:-7] + "Z"
es_insert_device_info(cl_id, body)
# body["device_id"] = cl_id
# body["last_modified"] = datetime.datetime.strftime(datetime.datetime.now(pytz.timezone("Asia/Shanghai")),
# "%Y-%m-%dT%H:%M:%S.%f")[:-7] + "Z"
# es_insert_device_info(cl_id, body)
# # write_user_portrait_doris(cl_id, ",".join(first_solutions_score.keys()), ",".join(second_solutions_score.keys()),
# # ",".join(first_demands_score.keys()), ",".join(second_demands_score.keys()),
......@@ -315,7 +314,7 @@ def consume_kafka():
spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()
spark.sparkContext.setLogLevel("WARN")
spark.sparkContext.addPyFile("/srv/apps/ffm-baseline_git/eda/smart_rank/tool.py")
spark.sparkContext.addPyFile("/srv/apps/ffm-baseline_git/eda/smart_rank/es_tool.py")
# spark.sparkContext.addPyFile("/srv/apps/ffm-baseline_git/eda/smart_rank/es_tool.py")
device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst, numSlices=1000)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment