Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
宋柯
meta_base_code
Commits
48337495
Commit
48337495
authored
Jun 04, 2021
by
郭羽
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
搜索指标统计
parent
da47d739
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
64 additions
and
62 deletions
+64
-62
task.sh
task.sh
+1
-1
daily_search_word_count.py
task/daily_search_word_count.py
+2
-0
search_strategy_d.py
task/search_strategy_d.py
+61
-61
No files found.
task.sh
View file @
48337495
...
...
@@ -2,7 +2,7 @@ source /srv/envs/esmm/bin/activate
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/conent_detail_page_grayscale_ctr.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_d.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_fix.py
#
/opt/spark/bin/spark-submit --master yarn --deploy-mode client --queue root.strategy --driver-memory 1g --executor-memory 2g --executor-cores 1 --num-executors 2 --conf spark.default.parallelism=50 --conf spark.storage.memoryFraction=0.5 --conf spark.shuffle.memoryFraction=0.3 --conf spark.locality.wait=0 --jars /srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/search_strategy_d.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/search_strategy_d.py
task/daily_search_word_count.py
View file @
48337495
...
...
@@ -128,6 +128,8 @@ for t in range(0, task_days):
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'search_home'
AND params['in_page_pos'] <> '更美热门榜'
union all
SELECT cl_id,
...
...
task/search_strategy_d.py
View file @
48337495
...
...
@@ -80,61 +80,61 @@ for t in range(0, task_days):
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
yesterday_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
one_week_age_str
=
(
now
+
datetime
.
timedelta
(
days
=-
7
))
.
strftime
(
"
%
Y
%
m
%
d"
)
sql_dev_device_id
=
"""
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date>='{yesterday_str}' AND partition_date<'{today_str}'
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = '{yesterday_str}'
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = '{yesterday_str}'
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = '{yesterday_str}'
)t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = '{yesterday_str}'
AND is_login_doctor = '1'
)t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
"""
.
format
(
yesterday_str
=
yesterday_str
,
today_str
=
today_str
)
print
(
sql_dev_device_id
)
dev_df
=
spark
.
sql
(
sql_dev_device_id
)
dev_df_view
=
dev_df
.
createOrReplaceTempView
(
"dev_view"
)
dev_df
.
cache
()
dev_df
.
show
(
1
)
sql_res
=
dev_df
.
collect
()
#
sql_dev_device_id = """
#
SELECT partition_date,device_id
# FROM
#
(--找出user_id当天活跃的第一个设备id
#
SELECT user_id,partition_date,
#
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
#
FROM online.ml_user_updates
#
WHERE partition_date>='{yesterday_str}' AND partition_date<'{today_str}'
#
)t1
#
JOIN
#
( --医生账号
#
SELECT distinct user_id
#
FROM online.tl_hdfs_doctor_view
#
WHERE partition_date = '{yesterday_str}'
#
#
--马甲账号/模特用户
#
UNION ALL
#
SELECT user_id
#
FROM ml.ml_c_ct_ui_user_dimen_d
#
WHERE partition_day = '{yesterday_str}'
#
AND (is_puppet = 'true' or is_classifyuser = 'true')
#
#
UNION ALL
#
--公司内网覆盖用户
#
select distinct user_id
#
from dim.dim_device_user_staff
#
#
UNION ALL
#
--登陆过医生设备
#
SELECT distinct t1.user_id
#
FROM
#
(
#
SELECT user_id, v.device_id as device_id
#
FROM online.ml_user_history_detail
#
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
#
WHERE partition_date = '{yesterday_str}'
#
)t1
#
JOIN
#
(
#
SELECT device_id
#
FROM online.ml_device_history_detail
#
WHERE partition_date = '{yesterday_str}'
#
AND is_login_doctor = '1'
#
)t2
#
ON t1.device_id = t2.device_id
#
)t2
#
on t1.user_id=t2.user_id
#
group by partition_date,device_id
#
""".format(yesterday_str=yesterday_str, today_str=today_str)
#
print(sql_dev_device_id)
#
dev_df = spark.sql(sql_dev_device_id)
#
dev_df_view = dev_df.createOrReplaceTempView("dev_view")
#
dev_df.cache()
#
dev_df.show(1)
#
sql_res = dev_df.collect()
# for res in sql_res:
# print(res)
...
...
@@ -266,10 +266,10 @@ for t in range(0, task_days):
LEFT JOIN spam_pv
on spam_pv.device_id=t1.cl_id
LEFT JOIN dev_view
on t1.partition_date=dev_view.partition_date and t1.cl_id=dev_view.device_id
--
LEFT JOIN dev_view
--
on t1.partition_date=dev_view.partition_date and t1.cl_id=dev_view.device_id
WHERE (spam_pv.device_id IS NULL or spam_pv.device_id ='')
and (dev_view.device_id is null or dev_view.device_id ='')
--
and (dev_view.device_id is null or dev_view.device_id ='')
GROUP BY t1.partition_date,t2.active_type,device_os_type,channel
)t
)t3
...
...
@@ -361,10 +361,10 @@ for t in range(0, task_days):
on t1.cl_id=dev.device_id and t1.partition_date = dev.partition_date
LEFT JOIN spam_pv
on spam_pv.device_id=t1.cl_id
LEFT JOIN dev_view
on t1.partition_date=dev_view.partition_date and t1.cl_id=dev_view.device_id
--
LEFT JOIN dev_view
--
on t1.partition_date=dev_view.partition_date and t1.cl_id=dev_view.device_id
WHERE (spam_pv.device_id IS NULL or spam_pv.device_id ='')
and (dev_view.device_id is null or dev_view.device_id ='')
--
and (dev_view.device_id is null or dev_view.device_id ='')
GROUP BY t1.partition_date,active_type,device_os_type,channel
)t4
on t3.partition_date=t4.partition_date and t3.active_type=t4.active_type and t3.device_os_type = t4.device_os_type AND t3.channel = t4.channel
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment