1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
# -*- coding:UTF-8 -*-
# @Time : 2020/11/18 14:05
# @File : daily_search_word_count_fix.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/11/13 11:08
# @File : daily_search_word_count.py
# @email : litao@igengmei.com
# @author : litao
# coding=utf-8
import hashlib
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
import pymysql
# from elasticsearch import Elasticsearch
import datetime
# from maintenance.func_send_email_with_file import send_file_email
# import zipfile
# es = Elasticsearch([
# {
# 'host': '172.16.31.17',
# 'port': 9200,
# }, {
# 'host': '172.16.31.11',
# 'port': 9200,
# }])
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("test")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate())
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
task_list = []
task_days = 80
for t in range(0, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
sql_search_ctr = r"""
SELECT query,
partition_date,
all_search_uv as all_search_uv, --全部搜索uv
all_search_pv as all_search_pv --全部搜索pv
FROM (
--搜索pvuv
SELECT query
, count(click.cl_id) as all_search_pv
, count(distinct click.cl_id) as all_search_uv,
partition_date
FROM (
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or
action = 'search_result_click_search')
UNION all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'do_search'
and params['input_type'] = 'everyone_watch'
and params['tab'] = '精选'
and page_name = 'home'
AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
union all
SELECT cl_id,
partition_date,
action,
'search_home' as page_name,
'' as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'search_home'
union all
SELECT cl_id,
partition_date,
action,
'home' as page_name,
'首页-猜你喜欢' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['in_page_pos'] = '猜你喜欢'
--AND params['tab_name']='精选'
AND params['card_type'] = 'search_word'
AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
--AND page_name='home' android的page_name为空
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'美购首页-大家都在搜' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'welfare_home'
AND params['card_type'] = 'search_word'
AND params['in_page_pos'] = '大家都在搜'
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'高亮词' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['card_type'] = 'highlight_word'
) click
LEFT JOIN
(
select distinct device_id
from ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除)
WHERE partition_day='{start_date}'
union all
select distinct device_id
from dim.dim_device_user_staff --去除内网用户
)spam_pv
on spam_pv.device_id=click.cl_id
LEFT JOIN
(
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date='{start_date}'
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = '{start_date}'
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = '{start_date}'
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = '{start_date}'
) t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = '{start_date}'
AND is_login_doctor = '1'
) t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
)dev
on click.partition_date=dev.partition_date and click.cl_id=dev.device_id
WHERE spam_pv.device_id IS NULL
and dev.device_id is null
GROUP BY query,click.partition_date
) t3 order by all_search_uv desc
""".format(start_date=yesterday_str, end_date=today_str)
print(sql_search_ctr)
search_ctr_df = spark.sql(sql_search_ctr)
search_ctr_df.show(1)
sql_res = search_ctr_df.collect()
tag_names_list_week = []
for name in sql_res:
# print(name)
word = name.query
nums = name.all_search_pv
uv = name.all_search_uv
partition_date = str(now + datetime.timedelta(days=-1))
tag_names_list_week.append((word, nums, uv, partition_date))
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
insert_sql = "replace into daily_search_word_count_fix(word, nums, uv,pid,partition_day) VALUES(%s,%s,%s,%s,%s)"
insert_list = []
for count, item in enumerate(tag_names_list_week):
word, nums, uv, partition_date = item
try:
if len(word) >= 200:
continue
pid = hashlib.md5((partition_date + word).encode("utf8")).hexdigest()
insert_sql_tuple = (word, nums, uv, pid, partition_date)
insert_list.append(insert_sql_tuple)
# print(insert_sql_tuple)
except:
continue
if count % 100 == 0:
cursor.execute("set names 'UTF8'")
res = cursor.executemany(insert_sql, insert_list)
db.commit()
# print(res)
insert_list = []
# print(count)
res = cursor.executemany(insert_sql, insert_list)
db.commit()
db.close()
print(res)
if __name__ == "__main__":
tag_names_list = []
tag_names_list_week = []
all_data_day = []
all_data_week = []