Commit bfb08c9e authored by 赵威's avatar 赵威

get offline ids

parent cd5fa9bf
......@@ -154,3 +154,9 @@ def get_diary_info_from_es(fields=["id"]):
}
results = es_scan("diary", q)
return results
def get_offline_ids(content_type):
q = {"query": {"bool": {"must": [{"term": {"is_online": False}}]}}}
results = es_scan(content_type, q)
return results
......@@ -10,6 +10,7 @@ from gensim.models import Word2Vec, word2vec
from utils.date import get_ndays_before_no_minus, get_ndays_before_with_format
from utils.files import DATA_PATH, MODEL_PATH
from utils.spark import get_spark
from utils.es import get_offline_ids
answer_click_ids_model_path = os.path.join(MODEL_PATH, "answer_click_ids_item2vec_model")
......@@ -123,6 +124,24 @@ def get_answer_click_data(spark, start, end):
return df
def get_offline_answer_ids():
count = 0
res_set = set([])
for item in get_offline_ids("answer"):
count += 1
try:
print(count)
id = int(item["_id"])
res_set.add(id)
except Exception as e:
print(e)
pass
return res_set
curl "http://172.16.31.17:9200/gm-dbmw-answer-read/_search?pretty&size=100" -d '{"query": {"bool": {"must": [{"term": {"is_online": true}}, {"match_phrase": {"tag_name_analyze": {"query": "草莓鼻", "analyzer": "gm_default_index"}}}]}}}'
def get_device_click_answer_ids_dict(click_df):
res = defaultdict(list)
cols = click_df.orderBy("partition_date", ascending=False).collect()
......@@ -154,24 +173,27 @@ def save_clicked_answer_ids_item2vec():
if __name__ == "__main__":
begin_time = time.time()
spark = get_spark("answer_click_ids")
click_df = get_answer_click_data(spark, get_ndays_before_no_minus(180), get_ndays_before_no_minus(1))
click_df.show(5, False)
print(click_df.count())
res_set = get_offline_answer_ids()
print(len(res_set))
# spark = get_spark("answer_click_ids")
# click_df = get_answer_click_data(spark, get_ndays_before_no_minus(180), get_ndays_before_no_minus(1))
# click_df.show(5, False)
# print(click_df.count())
res_dict = get_device_click_answer_ids_dict(click_df)
# res_dict = get_device_click_answer_ids_dict(click_df)
with open(os.path.join(DATA_PATH, "click_answer_ids.csv"), "w") as f:
for (k, v) in res_dict.items():
if v:
f.write("{}|{}\n".format(k, ",".join([str(x) for x in v])))
print("write data done.")
# with open(os.path.join(DATA_PATH, "click_answer_ids.csv"), "w") as f:
# for (k, v) in res_dict.items():
# if v:
# f.write("{}|{}\n".format(k, ",".join([str(x) for x in v])))
# print("write data done.")
save_clicked_answer_ids_item2vec()
# save_clicked_answer_ids_item2vec()
for id in ["986424", "744910", "703622"]:
print(ANSWER_CLICK_IDS_MODEL.wv.most_similar(id, topn=5))
# for id in ["986424", "744910", "703622"]:
# print(ANSWER_CLICK_IDS_MODEL.wv.most_similar(id, topn=5))
print("total cost: {:.2f}mins".format((time.time() - begin_time) / 60))
# print("total cost: {:.2f}mins".format((time.time() - begin_time) / 60))
# spark-submit --master yarn --deploy-mode client --queue root.strategy --driver-memory 16g --executor-memory 1g --executor-cores 1 --num-executors 70 --conf spark.default.parallelism=100 --conf spark.storage.memoryFraction=0.5 --conf spark.shuffle.memoryFraction=0.3 --conf spark.locality.wait=0 --jars /srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/strategy_embedding/word_vector/answer.py
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment