Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
黎涛
meta_base_code
Commits
0ad276bb
Commit
0ad276bb
authored
Sep 07, 2020
by
litaolemo
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
b7184195
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
471 additions
and
3 deletions
+471
-3
search_answer_ctr.py
task/search_answer_ctr.py
+234
-0
search_diary_ctr.py
task/search_diary_ctr.py
+2
-2
search_meigou_ctr.py
task/search_meigou_ctr.py
+1
-1
search_tractate_ctr.py
task/search_tractate_ctr.py
+234
-0
No files found.
task/search_answer_ctr.py
0 → 100644
View file @
0ad276bb
# -*- coding:UTF-8 -*-
# @Time : 2020/9/4 17:07
# @File : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao
import
hashlib
import
json
import
pymysql
import
xlwt
,
datetime
import
redis
# from pyhive import hive
from
maintenance.func_send_email_with_file
import
send_file_email
from
typing
import
Dict
,
List
from
elasticsearch_7
import
Elasticsearch
from
elasticsearch_7.helpers
import
scan
import
sys
import
time
from
pyspark
import
SparkConf
from
pyspark.sql
import
SparkSession
,
DataFrame
# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
def
con_sql
(
sql
):
# 从数据库的表里获取数据
db
=
pymysql
.
connect
(
host
=
'172.16.40.158'
,
port
=
4000
,
user
=
'st_user'
,
passwd
=
'aqpuBLYzEV7tML5RPsN1pntUzFy'
,
db
=
'jerry_prod'
)
cursor
=
db
.
cursor
()
cursor
.
execute
(
sql
)
result
=
cursor
.
fetchall
()
db
.
close
()
return
result
startTime
=
time
.
time
()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.gaia.jdbcuri"
,
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tidb.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.jerry.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"search_answer_ctr"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
task_list
=
[]
task_days
=
60
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
last_30_day_str
=
(
now
+
datetime
.
timedelta
(
days
=-
30
))
.
strftime
(
"
%
Y
%
m
%
d"
)
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
yesterday_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
one_week_age_str
=
(
now
+
datetime
.
timedelta
(
days
=-
7
))
.
strftime
(
"
%
Y
%
m
%
d"
)
sql_search_ctr
=
r"""
select D.ACTIVE_TYPE,D.DEVICE_OS_TYPE,sum(T.CLICK_NUM) as CLICK_NUM,sum(C.EXPOSURE) as EXPOSURE from
(SELECT T.DEVICE_ID, --设备ID
T.CARD_ID, --卡片ID
SUM(T.CLICK_NUM) AS CLICK_NUM --点击次数
FROM ML.ML_C_ET_CK_CLICK_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_question_answer'
AND T.ACTION IN ('on_click_card')
GROUP BY T.DEVICE_ID,
T.CARD_ID) T
left join
(SELECT T.DEVICE_ID as DEVICE_ID, --设备ID
T.CARD_ID as CARD_ID, --卡片ID
COUNT(T.CARD_ID) AS EXPOSURE --点击次数
FROM ML.MID_ML_C_ET_PE_PRECISEEXPOSURE_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_question_answer'
GROUP BY T.DEVICE_ID,
T.CARD_ID) C on T.DEVICE_ID=C.DEVICE_ID and T.CARD_ID = C.CARD_ID
LEFT JOIN
(
SELECT T.DEVICE_ID,
T.DEVICE_OS_TYPE,
T.ACTIVE_TYPE
FROM ML.ML_C_CT_DV_DEVICE_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.ACTIVE_TYPE IN ('1', '2', '4'))
D on T.DEVICE_ID = D.DEVICE_ID
LEFT JOIN
(
SELECT DISTINCT device_id
FROM ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除)
WHERE partition_day='{partition_day}'
UNION ALL
SELECT DISTINCT device_id
FROM dim.dim_device_user_staff --去除内网用户
)spam_pv
on spam_pv.device_id=T.DEVICE_ID
LEFT JOIN
(
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date>='{partition_day}' AND partition_date<'{end_date}'
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = '{partition_day}'
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = '{partition_day}'
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = '{partition_day}'
)t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = '{partition_day}'
AND is_login_doctor = '1'
)t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
)dev
on T.DEVICE_ID=dev.device_id
WHERE (spam_pv.device_id IS NULL or spam_pv.device_id = '')
and (dev.device_id is null or dev.device_id='')
GROUP by D.DEVICE_OS_TYPE,
D.ACTIVE_TYPE
"""
.
format
(
partition_day
=
yesterday_str
,
end_date
=
today_str
)
print
(
sql_search_ctr
)
search_ctr_df
=
spark
.
sql
(
sql_search_ctr
)
# spam_pv_df.createOrReplaceTempView("dev_view")
search_ctr_df
.
show
(
1
)
sql_res
=
search_ctr_df
.
collect
()
res_dict
=
{
"新增"
:
{
"ios"
:
{
"click_num"
:
0
,
"exposure"
:
0
},
"android"
:
{
"click_num"
:
0
,
"exposure"
:
0
}
},
"老活"
:
{
"ios"
:
{
"click_num"
:
0
,
"exposure"
:
0
},
"android"
:
{
"click_num"
:
0
,
"exposure"
:
0
}
}
}
print
(
"-------------------------------"
)
db
=
pymysql
.
connect
(
host
=
'172.16.40.158'
,
port
=
4000
,
user
=
'st_user'
,
passwd
=
'aqpuBLYzEV7tML5RPsN1pntUzFy'
,
db
=
'jerry_prod'
)
cursor
=
db
.
cursor
()
for
res
in
sql_res
:
print
(
res
)
if
res
.
ACTIVE_TYPE
:
if
res
.
ACTIVE_TYPE
in
(
'1'
,
'2'
):
res_dict
[
"新增"
][
res
.
DEVICE_OS_TYPE
][
"click_num"
]
+=
res
.
CLICK_NUM
res_dict
[
"新增"
][
res
.
DEVICE_OS_TYPE
][
"exposure"
]
+=
res
.
EXPOSURE
else
:
res_dict
[
"老活"
][
res
.
DEVICE_OS_TYPE
][
"click_num"
]
+=
res
.
CLICK_NUM
res_dict
[
"老活"
][
res
.
DEVICE_OS_TYPE
][
"exposure"
]
+=
res
.
EXPOSURE
for
active_type
in
res_dict
:
for
device_os_type
in
res_dict
[
active_type
]:
partition_date
=
yesterday_str
pid
=
hashlib
.
md5
((
partition_date
+
device_os_type
+
active_type
)
.
encode
(
"utf8"
))
.
hexdigest
()
click_num
=
res_dict
[
active_type
][
device_os_type
][
"click_num"
]
exposure
=
res_dict
[
active_type
][
device_os_type
][
"exposure"
]
try
:
search_ctr
=
round
(
click_num
/
exposure
,
5
)
except
:
search_ctr
=
0
instert_sql
=
"""replace into search_answer_ctr(
partition_date,device_os_type,active_type,pid,click_num,exposure,search_ctr) VALUES('{partition_date}','{device_os_type}','{active_type}','{pid}',{click_num},{exposure},{search_ctr});"""
.
format
(
partition_date
=
partition_date
,
device_os_type
=
device_os_type
,
active_type
=
active_type
,
pid
=
pid
,
click_num
=
click_num
,
exposure
=
exposure
,
search_ctr
=
search_ctr
)
print
(
instert_sql
)
# cursor.execute("set names 'UTF8'")
res
=
cursor
.
execute
(
instert_sql
)
db
.
commit
()
print
(
res
)
# cursor.executemany()
db
.
close
()
task/search_diary_ctr.py
View file @
0ad276bb
...
...
@@ -61,7 +61,7 @@ sparkConf.set("prod.jerry.jdbcuri",
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"search_diary_ctr"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"search_diary_ctr"
)
.
enableHiveSupport
()
.
getOrCreate
())
...
...
@@ -73,7 +73,7 @@ spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJso
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
task_list
=
[]
task_days
=
9
0
task_days
=
6
0
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
...
...
task/search_meigou_ctr.py
View file @
0ad276bb
...
...
@@ -73,7 +73,7 @@ spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJso
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
task_list
=
[]
task_days
=
90
task_days
=
1
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
...
...
task/search_tractate_ctr.py
0 → 100644
View file @
0ad276bb
# -*- coding:UTF-8 -*-
# @Time : 2020/9/4 17:07
# @File : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao
import
hashlib
import
json
import
pymysql
import
xlwt
,
datetime
import
redis
# from pyhive import hive
from
maintenance.func_send_email_with_file
import
send_file_email
from
typing
import
Dict
,
List
from
elasticsearch_7
import
Elasticsearch
from
elasticsearch_7.helpers
import
scan
import
sys
import
time
from
pyspark
import
SparkConf
from
pyspark.sql
import
SparkSession
,
DataFrame
# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
def
con_sql
(
sql
):
# 从数据库的表里获取数据
db
=
pymysql
.
connect
(
host
=
'172.16.40.158'
,
port
=
4000
,
user
=
'st_user'
,
passwd
=
'aqpuBLYzEV7tML5RPsN1pntUzFy'
,
db
=
'jerry_prod'
)
cursor
=
db
.
cursor
()
cursor
.
execute
(
sql
)
result
=
cursor
.
fetchall
()
db
.
close
()
return
result
startTime
=
time
.
time
()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.gaia.jdbcuri"
,
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tidb.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.jerry.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"search_tractate_ctr"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
task_list
=
[]
task_days
=
60
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
last_30_day_str
=
(
now
+
datetime
.
timedelta
(
days
=-
30
))
.
strftime
(
"
%
Y
%
m
%
d"
)
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
yesterday_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
one_week_age_str
=
(
now
+
datetime
.
timedelta
(
days
=-
7
))
.
strftime
(
"
%
Y
%
m
%
d"
)
sql_search_ctr
=
r"""
select D.ACTIVE_TYPE,D.DEVICE_OS_TYPE,sum(T.CLICK_NUM) as CLICK_NUM,sum(C.EXPOSURE) as EXPOSURE from
(SELECT T.DEVICE_ID, --设备ID
T.CARD_ID, --卡片ID
SUM(T.CLICK_NUM) AS CLICK_NUM --点击次数
FROM ML.ML_C_ET_CK_CLICK_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_post'
AND T.ACTION IN ('search_result_click_infomation_item','on_click_topic_card')
GROUP BY T.DEVICE_ID,
T.CARD_ID) T
left join
(SELECT T.DEVICE_ID as DEVICE_ID, --设备ID
T.CARD_ID as CARD_ID, --卡片ID
COUNT(T.CARD_ID) AS EXPOSURE --点击次数
FROM ML.MID_ML_C_ET_PE_PRECISEEXPOSURE_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_post'
GROUP BY T.DEVICE_ID,
T.CARD_ID) C on T.DEVICE_ID=C.DEVICE_ID and T.CARD_ID = C.CARD_ID
LEFT JOIN
(
SELECT T.DEVICE_ID,
T.DEVICE_OS_TYPE,
T.ACTIVE_TYPE
FROM ML.ML_C_CT_DV_DEVICE_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.ACTIVE_TYPE IN ('1', '2', '4'))
D on T.DEVICE_ID = D.DEVICE_ID
LEFT JOIN
(
SELECT DISTINCT device_id
FROM ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除)
WHERE partition_day='{partition_day}'
UNION ALL
SELECT DISTINCT device_id
FROM dim.dim_device_user_staff --去除内网用户
)spam_pv
on spam_pv.device_id=T.DEVICE_ID
LEFT JOIN
(
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date>='{partition_day}' AND partition_date<'{end_date}'
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = '{partition_day}'
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = '{partition_day}'
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = '{partition_day}'
)t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = '{partition_day}'
AND is_login_doctor = '1'
)t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
)dev
on T.DEVICE_ID=dev.device_id
WHERE (spam_pv.device_id IS NULL or spam_pv.device_id = '')
and (dev.device_id is null or dev.device_id='')
GROUP by D.DEVICE_OS_TYPE,
D.ACTIVE_TYPE
"""
.
format
(
partition_day
=
yesterday_str
,
end_date
=
today_str
)
print
(
sql_search_ctr
)
search_ctr_df
=
spark
.
sql
(
sql_search_ctr
)
# spam_pv_df.createOrReplaceTempView("dev_view")
search_ctr_df
.
show
(
1
)
sql_res
=
search_ctr_df
.
collect
()
res_dict
=
{
"新增"
:
{
"ios"
:
{
"click_num"
:
0
,
"exposure"
:
0
},
"android"
:
{
"click_num"
:
0
,
"exposure"
:
0
}
},
"老活"
:
{
"ios"
:
{
"click_num"
:
0
,
"exposure"
:
0
},
"android"
:
{
"click_num"
:
0
,
"exposure"
:
0
}
}
}
print
(
"-------------------------------"
)
db
=
pymysql
.
connect
(
host
=
'172.16.40.158'
,
port
=
4000
,
user
=
'st_user'
,
passwd
=
'aqpuBLYzEV7tML5RPsN1pntUzFy'
,
db
=
'jerry_prod'
)
cursor
=
db
.
cursor
()
for
res
in
sql_res
:
print
(
res
)
if
res
.
ACTIVE_TYPE
:
if
res
.
ACTIVE_TYPE
in
(
'1'
,
'2'
):
res_dict
[
"新增"
][
res
.
DEVICE_OS_TYPE
][
"click_num"
]
+=
res
.
CLICK_NUM
res_dict
[
"新增"
][
res
.
DEVICE_OS_TYPE
][
"exposure"
]
+=
res
.
EXPOSURE
else
:
res_dict
[
"老活"
][
res
.
DEVICE_OS_TYPE
][
"click_num"
]
+=
res
.
CLICK_NUM
res_dict
[
"老活"
][
res
.
DEVICE_OS_TYPE
][
"exposure"
]
+=
res
.
EXPOSURE
for
active_type
in
res_dict
:
for
device_os_type
in
res_dict
[
active_type
]:
partition_date
=
yesterday_str
pid
=
hashlib
.
md5
((
partition_date
+
device_os_type
+
active_type
)
.
encode
(
"utf8"
))
.
hexdigest
()
click_num
=
res_dict
[
active_type
][
device_os_type
][
"click_num"
]
exposure
=
res_dict
[
active_type
][
device_os_type
][
"exposure"
]
try
:
search_ctr
=
round
(
click_num
/
exposure
,
5
)
except
:
search_ctr
=
0
instert_sql
=
"""replace into search_tractate_ctr(
partition_date,device_os_type,active_type,pid,click_num,exposure,search_ctr) VALUES('{partition_date}','{device_os_type}','{active_type}','{pid}',{click_num},{exposure},{search_ctr});"""
.
format
(
partition_date
=
partition_date
,
device_os_type
=
device_os_type
,
active_type
=
active_type
,
pid
=
pid
,
click_num
=
click_num
,
exposure
=
exposure
,
search_ctr
=
search_ctr
)
print
(
instert_sql
)
# cursor.execute("set names 'UTF8'")
res
=
cursor
.
execute
(
instert_sql
)
db
.
commit
()
print
(
res
)
# cursor.executemany()
db
.
close
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment