Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
宋柯
meta_base_code
Commits
f6b8c2e3
Commit
f6b8c2e3
authored
Nov 25, 2020
by
litaolemo
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
52f138d9
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
284 additions
and
0 deletions
+284
-0
daily_search_word_count_last_two_year.py
task/daily_search_word_count_last_two_year.py
+284
-0
No files found.
task/daily_search_word_count_last_two_year.py
0 → 100644
View file @
f6b8c2e3
# -*- coding:UTF-8 -*-
# @Time : 2020/11/25 16:11
# @File : daily_search_word_count_last_two_year.py
# @email : litao@igengmei.com
# @author : litao
# coding=utf-8
import
hashlib
import
time
from
pyspark
import
SparkConf
from
pyspark.sql
import
SparkSession
,
DataFrame
import
pymysql
from
elasticsearch
import
Elasticsearch
import
datetime
# from maintenance.func_send_email_with_file import send_file_email
# import zipfile
# es = Elasticsearch([
# {
# 'host': '172.16.31.17',
# 'port': 9200,
# }, {
# 'host': '172.16.31.11',
# 'port': 9200,
# }])
es
=
Elasticsearch
([
{
'host'
:
'172.16.31.13'
,
'port'
:
9000
,
}])
def
get_all_tag
():
res_dict
=
{}
sql
=
"select name from api_tag where is_online = 1 and tag_type = 3"
db
=
pymysql
.
connect
(
host
=
'172.16.30.136'
,
port
=
3306
,
user
=
'doris'
,
passwd
=
'o5gbA27hXHHm'
,
db
=
'doris_prod'
)
cursor
=
db
.
cursor
()
cursor
.
execute
(
sql
)
result
=
cursor
.
fetchall
()
db
.
close
()
for
res
in
result
:
res_dict
[
res
[
0
]]
=
1
return
res_dict
def
from_es_get_service_num
(
query
):
q
=
{
"query"
:
{
"bool"
:
{
"must"
:
{
"multi_match"
:
{
"query"
:
query
,
"fields"
:
[
"name"
,
"sku_list.name"
],
"operator"
:
"and"
,
"type"
:
"best_fields"
,
"analyzer"
:
"ik_max_word"
}
}
}
}
}
results
=
es
.
search
(
index
=
'gm_dbmw-service-read'
,
doc_type
=
'service'
,
timeout
=
'10s'
,
size
=
0
,
body
=
q
)
if
results
[
'hits'
][
'total'
]
>
0
:
return
results
[
'hits'
][
'total'
]
else
:
return
0
startTime
=
time
.
time
()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.gaia.jdbcuri"
,
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tidb.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.jerry.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"test"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
task_list
=
[]
task_days
=
2
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
# last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
last_two_year_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
# one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
sql_search_ctr
=
r"""
SELECT query,
partition_date,
all_search_uv as all_search_uv, --全部搜索uv
all_search_pv as all_search_pv --全部搜索pv
FROM (
--搜索pvuv
SELECT query
, count(click.cl_id) as all_search_pv
, count(distinct click.cl_id) as all_search_uv,
partition_date
FROM (
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or
action = 'search_result_click_search')
UNION all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'do_search'
and params['input_type'] = 'everyone_watch'
and params['tab'] = '精选'
and page_name = 'home'
AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
union all
SELECT cl_id,
partition_date,
action,
'search_home' as page_name,
'' as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'search_home'
union all
SELECT cl_id,
partition_date,
action,
'home' as page_name,
'首页-猜你喜欢' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['in_page_pos'] = '猜你喜欢'
--AND params['tab_name']='精选'
AND params['card_type'] = 'search_word'
AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
--AND page_name='home' android的page_name为空
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'美购首页-大家都在搜' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'welfare_home'
AND params['card_type'] = 'search_word'
AND params['in_page_pos'] = '大家都在搜'
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'高亮词' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['card_type'] = 'highlight_word'
) click
GROUP BY query,partition_date
) t3 order by all_search_uv desc
"""
.
format
(
start_date
=
last_two_year_str
,
end_date
=
today_str
)
print
(
sql_search_ctr
)
search_ctr_df
=
spark
.
sql
(
sql_search_ctr
)
search_ctr_df
.
show
(
1
)
sql_res
=
search_ctr_df
.
collect
()
tag_names_list_week
=
[]
tag_dict
=
get_all_tag
()
for
name
in
sql_res
:
# print(name)
keywords
=
name
.
query
pv
=
name
.
all_search_pv
uv
=
name
.
all_search_uv
is_tag
=
tag_dict
.
get
(
keywords
,
0
)
servise_num
=
from_es_get_service_num
(
keywords
)
tag_names_list_week
.
append
((
keywords
,
today_str
,
0
,
servise_num
,
pv
,
is_tag
))
db
=
pymysql
.
connect
(
host
=
'172.16.30.136'
,
port
=
3306
,
user
=
'doris'
,
passwd
=
'o5gbA27hXHHm'
,
db
=
'doris_prod'
)
cursor
=
db
.
cursor
()
delete_sql
=
"TRUNCATE TABLE strategy_wiki_smr_score"
cursor
.
execute
(
delete_sql
)
db
.
commit
()
insert_sql
=
"replace into strategy_wiki_smr_score(keywords, today_str,is_delete, service_num, pv, is_tag) VALUES(
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
insert_list
=
[]
for
count
,
item
in
enumerate
(
tag_names_list_week
):
keywords
,
today_str
,
is_delete
,
service_num
,
pv
,
is_tag
=
item
try
:
if
len
(
keywords
)
>=
200
:
continue
insert_sql_tuple
=
(
keywords
,
today_str
,
is_delete
,
service_num
,
pv
,
is_tag
)
insert_list
.
append
(
insert_sql_tuple
)
# print(insert_sql_tuple)
except
:
continue
if
count
%
100
==
0
:
cursor
.
execute
(
"set names 'UTF8'"
)
res
=
cursor
.
executemany
(
insert_sql
,
insert_list
)
db
.
commit
()
# print(res)
insert_list
=
[]
# print(count)
res
=
cursor
.
executemany
(
insert_sql
,
insert_list
)
db
.
commit
()
db
.
close
()
print
(
res
)
if
__name__
==
"__main__"
:
pass
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment