1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pymysql
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import redis
import datetime
from pyspark import SparkConf
import time
from pyspark.sql import SparkSession
import json
import numpy as np
import pandas as pd
from pyspark.sql.functions import lit
from pyspark.sql.functions import concat_ws
def send_email(app,id,e):
# 第三方 SMTP 服务
mail_host = 'smtp.exmail.qq.com' # 设置服务器
mail_user = "gaoyazhe@igengmei.com" # 用户名
mail_pass = "VCrKTui99a7ALhiK" # 口令
sender = 'gaoyazhe@igengmei.com'
receivers = ['gaoyazhe@igengmei.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
e = str(e)
msg = MIMEMultipart()
part = MIMEText('app_id:'+id+':fail', 'plain', 'utf-8')
msg.attach(part)
msg['From'] = formataddr(["gaoyazhe", sender])
# 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['To'] = ";".join(receivers)
# message['Cc'] = ";".join(cc_reciver)
msg['Subject'] = 'spark streaming:app_name:'+app
with open('error.txt','w') as f:
f.write(e)
f.close()
part = MIMEApplication(open('error.txt', 'r').read())
part.add_header('Content-Disposition', 'attachment', filename="error.txt")
msg.attach(part)
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, msg.as_string())
except smtplib.SMTPException:
print('error')
def compute_henqiang(x):
score = 15-x*((15-0.5)/180)
if score>0.5:
return score
else:
return 0.5
def compute_jiaoqiang(x):
score = 12-x*(12/180)
if score>0.5:
return score
else:
return 0.5
def compute_ruoyixiang(x):
score = 5-x*((5-0.5)/180)
if score>0.5:
return score
else:
return 0.5
def compute_validate(x):
score = 10-x*((10-0.5)/180)
if score>0.5:
return score
else:
return 0.5
def compute_ai_scan(x):
score = 2 - x * ((2 - 0.5) / 180)
if score>0.5:
return score
else:
return 0.5
def tag_list2dict(lst, size):
result = []
if lst:
for i in lst:
tmp = dict()
tmp["content"] = i["tag_id"]
if isinstance(i['tag_id'], int):
tmp["type"] = "tag"
else:
tmp["type"] = "search_word"
tmp["score"] = i["tag_score"]
result.append(tmp)
return result[:size]
def get_user_tag_score(cl_id, size=10):
try:
db_jerry_test = pymysql.connect(host='172.16.40.158', port=4000, user='root', passwd='3SYz54LS9#^9sBvC',
db='jerry_test', charset='utf8')
cur_jerry_test = db_jerry_test.cursor()
# 获取该用户的所有行为
sql_user_log = "select time,cl_id,score_type,tag_id,tag_referrer,action from user_new_tag_log where cl_id ='{}'".format(cl_id)
cur_jerry_test.execute(sql_user_log)
user_log = cur_jerry_test.fetchall()
user_log_df = pd.DataFrame(list(user_log))
user_log_df.columns = ["time", "cl_id", "score_type","tag_id","tag_referrer","action"]
if not user_log_df.empty:
user_log_df["tag_id"] = np.where(user_log_df["action"] == "do_search",user_log_df["tag_referrer"],user_log_df["tag_id"])
user_log_df["days_diff_now"] = round((int(time.time())-user_log_df["time"]) / (24*60*60))
user_log_df["tag_score"] = user_log_df.apply(
lambda x: compute_henqiang(x.days_diff_now) if x.score_type == "henqiang" else (
compute_jiaoqiang(x.days_diff_now) if x.score_type == "jiaoqiang" else (
compute_ai_scan(x.days_diff_now) if x.score_type == "ai_scan" else (
compute_ruoyixiang(x.days_diff_now) if x.score_type == "ruoyixiang" else compute_validate(x.days_diff_now)))), axis=1)
finally_score = user_log_df.sort_values(by=["tag_score","time"],ascending=False)
finally_score.drop_duplicates(subset="tag_id", inplace=True)
finally_score_lst = finally_score[["tag_id","tag_score"]].to_dict('record')
tag_id_list = tag_list2dict(finally_score_lst, size)
# 写gmkv
gm_kv_cli = redis.Redis(host="172.16.40.135", port=5379, db=2, socket_timeout=2000)
cl_id_portrait_key = "user:portrait_tags:cl_id:" + str(cl_id)
tag_id_list_json = json.dumps(tag_id_list)
gm_kv_cli.set(cl_id_portrait_key, tag_id_list_json)
gm_kv_cli.expire(cl_id_portrait_key, time=30 * 24 * 60 * 60)
# 写tidb
stat_date = datetime.datetime.today().strftime('%Y-%m-%d')
replace_sql = """replace into user_portrait_tags (stat_date, cl_id, tag_list) values("{stat_date}","{cl_id}","{tag_list}")"""\
.format(stat_date=stat_date, cl_id=cl_id, tag_list=tag_id_list)
cur_jerry_test.execute(replace_sql)
db_jerry_test.commit()
db_jerry_test.close()
return "sucess"
db_jerry_test.close()
except Exception as e:
return 'pass'
if __name__ == '__main__':
try:
db_jerry_test = pymysql.connect(host='172.16.40.170', port=4000, user='root', passwd='3SYz54LS9#^9sBvC',
db='jerry_test', charset='utf8')
cur_jerry_test = db_jerry_test.cursor()
# 获取所有用户的设备id
# sql_device_ids = "select distinct cl_id from user_new_tag_log"
# 获取最近30天内的用户设备id
sql_device_ids = "select distinct cl_id from user_new_tag_log " \
"where time > UNIX_TIMESTAMP(DATE_SUB(NOW(), INTERVAL 30 day))"
cur_jerry_test.execute(sql_device_ids)
device_ids_lst = [i[0] for i in cur_jerry_test.fetchall()]
# 获取所有用户的行为日志
# sql_all_log = "select time,cl_id,score_type,tag_id,tag_referrer,action from user_new_tag_log"
# rdd
sparkConf = SparkConf().set("spark.hive.mapred.supports.subdirectories", "true") \
.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", "true") \
.set("spark.tispark.plan.allow_index_double_read", "false") \
.set("spark.tispark.plan.allow_index_read", "true") \
.set("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") \
.set("spark.tispark.pd.addresses", "172.16.40.170:2379").set("spark.io.compression.codec", "lzf") \
.set("spark.driver.maxResultSize", "8g").set("spark.sql.avro.compression.codec", "snappy")
spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()
spark.sparkContext.setLogLevel("WARN")
device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst)
result = device_ids_lst_rdd.repartition(100).map(lambda x: get_user_tag_score(x))
result.collect()
except Exception as e:
send_email("dist_update_user_portrait", "dist_update_user_portrait", "dist_update_user_portrait")