Commit d475ced6 authored by 高雅喆's avatar 高雅喆

更新新用户的轻医美标签词

parent 06eafecd
...@@ -20,39 +20,6 @@ from pyspark.sql.functions import concat_ws ...@@ -20,39 +20,6 @@ from pyspark.sql.functions import concat_ws
from tool import * from tool import *
def send_email(app,id,e):
# 第三方 SMTP 服务
mail_host = 'smtp.exmail.qq.com' # 设置服务器
mail_user = "gaoyazhe@igengmei.com" # 用户名
mail_pass = "VCrKTui99a7ALhiK" # 口令
sender = 'gaoyazhe@igengmei.com'
receivers = ['gaoyazhe@igengmei.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
e = str(e)
msg = MIMEMultipart()
part = MIMEText('app_id:'+id+':fail', 'plain', 'utf-8')
msg.attach(part)
msg['From'] = formataddr(["gaoyazhe", sender])
# 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['To'] = ";".join(receivers)
# message['Cc'] = ";".join(cc_reciver)
msg['Subject'] = 'spark streaming:app_name:'+app
with open('error.txt','w') as f:
f.write(e)
f.close()
part = MIMEApplication(open('error.txt', 'r').read())
part.add_header('Content-Disposition', 'attachment', filename="error.txt")
msg.attach(part)
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, msg.as_string())
except smtplib.SMTPException:
print('error')
def get_hot_search_words_tag(): def get_hot_search_words_tag():
try: try:
hot_search = """ hot_search = """
...@@ -73,7 +40,6 @@ def get_hot_search_words_tag(): ...@@ -73,7 +40,6 @@ def get_hot_search_words_tag():
return [] return []
def get_user_history_order_service_tag(user_id): def get_user_history_order_service_tag(user_id):
try:
if user_id: if user_id:
db_zhengxing = pymysql.connect(host="172.16.30.141", port=3306, user="work", db_zhengxing = pymysql.connect(host="172.16.30.141", port=3306, user="work",
password="BJQaT9VzDcuPBqkd", password="BJQaT9VzDcuPBqkd",
...@@ -109,12 +75,9 @@ def get_user_history_order_service_tag(user_id): ...@@ -109,12 +75,9 @@ def get_user_history_order_service_tag(user_id):
# db_jerry_test.commit() # db_jerry_test.commit()
# db_jerry_test.close() # db_jerry_test.close()
return user_id return user_id
except Exception as e:
return 'pass'
if __name__ == '__main__': if __name__ == '__main__':
try:
db_zhengxing = pymysql.connect(host="172.16.30.141", port=3306, user="work", db_zhengxing = pymysql.connect(host="172.16.30.141", port=3306, user="work",
password="BJQaT9VzDcuPBqkd", password="BJQaT9VzDcuPBqkd",
db="zhengxing", cursorclass=pymysql.cursors.DictCursor) db="zhengxing", cursorclass=pymysql.cursors.DictCursor)
...@@ -147,13 +110,8 @@ if __name__ == '__main__': ...@@ -147,13 +110,8 @@ if __name__ == '__main__':
hot_search_words_portrait_dict = {i["keywords"]: 0.2 for i in hot_search_words} hot_search_words_portrait_dict = {i["keywords"]: 0.2 for i in hot_search_words}
redis_client.hmset(hot_search_words_portrait_portrait_key2, hot_search_words_portrait_dict) redis_client.hmset(hot_search_words_portrait_portrait_key2, hot_search_words_portrait_dict)
hot_search_words = ["明星娱乐", "网红扒一扒", "明星颜值打call", "颜商", "颜值高光时刻", "瘦脸针", "水光针", "光子嫩肤", "热玛吉", "瘦腿针", "超声刀", hot_search_words = ["明星整形", "明星抗衰", "明星颜值", "明星婚恋", "网红整形", "网红抗衰", "网红颜值", "网红婚恋", "审美", "双眼皮", "牙齿矫正", "水光针",
"瘦肩针", "果酸焕肤", "玻尿酸", "小气泡", "隆鼻"]
"热拉提", "微针", "点阵激光", "小气泡", "玻尿酸丰下巴", "埋线双眼皮", "纹眉", "溶脂针瘦脸", "黄金微针", "点痣", "激光祛斑",
"白瓷娃娃",
"除皱针注射", "微针祛痘坑", "玻尿酸", "胶原蛋白", "果酸", "黑脸娃娃", "童颜针", "祛斑", "祛痣", "祛黑头", "祛疤",
"祛痘", "美瞳", "孕睫", "少女针", "面部提升", "嫩肤", "镭射净肤", "红蓝光", "清洁",
"补水", "抗衰", "美白", "冷光美白", "网红抗衰", "网红整形", "网红颜值", "网红婚恋", "明星抗衰", "明星整形", "明星婚恋", "明星颜值"]
hot_search_words_portrait_portrait_key3 = "user:service_coldstart_tags3" hot_search_words_portrait_portrait_key3 = "user:service_coldstart_tags3"
hot_search_words_portrait3_dict = {i: 0.2 for i in hot_search_words} hot_search_words_portrait3_dict = {i: 0.2 for i in hot_search_words}
redis_client.hmset(hot_search_words_portrait_portrait_key3, hot_search_words_portrait3_dict) redis_client.hmset(hot_search_words_portrait_portrait_key3, hot_search_words_portrait3_dict)
...@@ -174,6 +132,3 @@ if __name__ == '__main__': ...@@ -174,6 +132,3 @@ if __name__ == '__main__':
device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst) device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst)
result = device_ids_lst_rdd.repartition(100).map(lambda x: get_user_history_order_service_tag(x)) result = device_ids_lst_rdd.repartition(100).map(lambda x: get_user_history_order_service_tag(x))
result.collect() result.collect()
except Exception as e:
send_email("dist_update_user_history_order_tags", "dist_update_user_history_order_tags", "")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment