1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# -*- coding:UTF-8 -*-
# @Time : 2020/9/14 14:53
# @File : meigou_huidu_huisu.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
import json
import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
def con_sql(sql):
# 从数据库的表里获取数据
db = pymysql.connect(host='172.16.30.136', port=3306, user='doris_olap', passwd='bA27hXasdfswuolap',
db='doris_olap')
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
db.close()
return result
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("meigou_huidu_huisu")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName(
"meigou_huidu_huisu").enableHiveSupport().getOrCreate())
# spark.sparkContext.setLogLevel("ERROR")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
task_list = []
task_days = 3
for t in range(1, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
sql_search_ctr = r"""
SELECT
t1.partition_date as `日期`
,active_type as `系统`
,device_os_type as `活跃`
,grey_type as `灰度类型`
,round(NVL(sum(click_pv),0)/NVL(sum(exp_pv),0)*100,2) as `卡片点击pv/卡片精准曝光pv(%)`
,round(NVL(sum(two_click_pv),0)/NVL(sum(exp_pv),0)*100,2) as `有效二跳pv/卡片精准曝光pv(%)`
,round(NVL(sum(two_click_pv),0)/NVL(sum(click_pv),0)*100,2) as `有效二跳pv/卡片点击pv(%)`
,round(NVL(sum(cpc_exp_pv),0)/NVL(sum(exp_pv),0)*100,2) as `cpc卡片曝光pv/卡片精准曝光pv(%)`
,NVL(sum(click_pv),0) as `卡片点击pv`
,NVL(sum(exp_pv),0) as `卡片曝光pv`
,NVL(sum(two_click_pv),0) as `有效二跳pv`
,NVL(sum(cpc_click_pv),0) as `cpc卡片点击pv`
,NVL(sum(cpc_exp_pv),0) as `cpc卡片曝光pv`
FROM
(
SELECT partition_date
,device_os_type
,CASE WHEN active_type = '4' THEN '老活'
WHEN active_type IN ('1','2') THEN '新增' END AS active_type
,device_id
,CASE WHEN substr(md5(device_id),-1) in ('0','1','2','3','4','5','6','7') THEN '灰度' ELSE '非灰' END AS grey_type
FROM online.ml_device_day_active_status
WHERE partition_date>={start_day}
AND partition_date<= {partition_day}
AND active_type IN ('1','2','4')
)t1
JOIN
(--精准曝光
SELECT cl_id,partition_date,card_id,count(1) as exp_pv,count(CASE WHEN get_json_object(exposure_card, '$.is_cpc')=1 THEN 1 END) as cpc_exp_pv
FROM online.ml_community_precise_exposure_detail
WHERE partition_date>={start_day}
AND partition_date<= {partition_day}
AND action in ('page_precise_exposure','home_choiceness_card_exposure') --7745版本action改为page_precise_exposure
AND page_name in('welfare_home')
AND tab_name in ('精选')
AND card_content_type ='service'
and (get_json_object(exposure_card,'$.in_page_pos')='' or get_json_object(exposure_card,'$.in_page_pos') is null)
group by partition_date,cl_id,card_id
)t2
on t1.device_id=t2.cl_id and t1.partition_date=t2.partition_date
LEFT JOIN
(--卡片点击
SELECT cl_id,partition_date,params['card_id'] as card_id,count(1) as click_pv,count(CASE WHEN params['is_cpc']=1 THEN 1 ELSE 0 END) as cpc_click_pv
FROM online.bl_hdfs_maidian_updates
WHERE partition_date>={start_day}
AND partition_date<= {partition_day}
AND action='on_click_card'
AND params['tab_name']='精选'
AND params['page_name'] ='welfare_home'
AND params['card_content_type'] ='service'
GROUP BY cl_id,partition_date,params['card_id']
)t3
on t2.partition_date=t3.partition_date
and t2.cl_id=t3.cl_id
and t2.card_id=t3.card_id
LEFT JOIN
(--商祥二跳
SELECT cl_id,partition_date,params['service_id'] as service_id,count(1) as two_click_pv
FROM online.bl_hdfs_maidian_updates
WHERE partition_date>={start_day}
AND partition_date<= {partition_day}
AND (referrer in ('welfare_home')
or (params['referrer_link'] like '%[%' and json_split(params['referrer_link'])[size(json_split(params['referrer_link']))-1] in ('welfare_home')))
AND ((action in ('welfare_multiattribute_click_add','welfare_multiattribute_click_buy') AND page_name = 'welfare_detail')
or action = 'welfare_detail_click_message')
GROUP BY cl_id,partition_date,params['service_id']
)t4
on t3.partition_date=t4.partition_date
and t3.cl_id=t4.cl_id
and t3.card_id=t4.service_id
LEFT JOIN
(
SELECT distinct device_id
FROM dim.dim_device_user_staff --去除内网用户
UNION ALL
SELECT device_id
FROM ml.ml_d_ct_dv_devicespam_d --剔除刷量设备
WHERE partition_day={partition_day}
)a
on t1.device_id=a.device_id
LEFT JOIN
(
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date>={start_day}
AND partition_date<= {partition_day}
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = {partition_day}
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = {partition_day}
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = {partition_day}
)t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = {partition_day}
AND is_login_doctor = '1'
)t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
)b
on t1.partition_date=b.partition_date and t1.device_id=b.device_id
where (a.device_id is NULL or a.device_id ='')
and (b.device_id is null or b.device_id ='')
GROUP BY t1.partition_date
,grey_type,active_type,device_os_type
order by 1
""".format(partition_day=today_str,start_day=yesterday_str)
print(sql_search_ctr)
search_ctr_df = spark.sql(sql_search_ctr)
# spam_pv_df.createOrReplaceTempView("dev_view")
search_ctr_df.show(1)
sql_res = search_ctr_df.collect()
for res in sql_res:
print(res)
task_list.append(res)
print(task_list)
# cursor.executemany()