Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
宋柯
meta_base_code
Commits
0b93ad93
Commit
0b93ad93
authored
Dec 01, 2020
by
litaolemo
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
02505046
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
251 additions
and
0 deletions
+251
-0
new_user_word_count.py
new_user_analysis/new_user_word_count.py
+251
-0
No files found.
new_user_analysis/new_user_word_count.py
0 → 100644
View file @
0b93ad93
# -*- coding:UTF-8 -*-
# @Time : 2020/12/1 17:40
# @File : new_user_word_count.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/11/18 14:05
# @File : daily_search_word_count_fix.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/11/13 11:08
# @File : daily_search_word_count.py
# @email : litao@igengmei.com
# @author : litao
# coding=utf-8
import
hashlib
import
time
from
pyspark
import
SparkConf
from
pyspark.sql
import
SparkSession
,
DataFrame
import
pymysql
# from elasticsearch import Elasticsearch
import
datetime
# from maintenance.func_send_email_with_file import send_file_email
# import zipfile
# es = Elasticsearch([
# {
# 'host': '172.16.31.17',
# 'port': 9200,
# }, {
# 'host': '172.16.31.11',
# 'port': 9200,
# }])
startTime
=
time
.
time
()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.gaia.jdbcuri"
,
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tidb.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.jerry.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"test"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
task_list
=
[]
task_days
=
3
for
t
in
range
(
0
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
last_30_day_str
=
(
now
+
datetime
.
timedelta
(
days
=-
30
))
.
strftime
(
"
%
Y
%
m
%
d"
)
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
yesterday_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
one_week_age_str
=
(
now
+
datetime
.
timedelta
(
days
=-
7
))
.
strftime
(
"
%
Y
%
m
%
d"
)
new_urser_device_id_sql
=
r"""
"""
.
format
(
today_str
=
today_str
)
sql_search_ctr
=
r"""
select t0.device_id as device_id,query from
((select device_id from online.ml_device_day_active_status where partition_date = '{today_str}' and active_type in (1,2)
) t2
LEFT JOIN
(
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY = '{today_str}'
AND is_abnormal_device = 'true'
)dev
on t2.device_id=dev.device_id
WHERE dev.device_id is null and t2.device_id is not null) t0 left join
( SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or
action = 'search_result_click_search')
UNION all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'do_search'
and params['input_type'] = 'everyone_watch'
and params['tab'] = '精选'
and page_name = 'home'
AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
union all
SELECT cl_id,
partition_date,
action,
'search_home' as page_name,
'' as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'search_home'
union all
SELECT cl_id,
partition_date,
action,
'home' as page_name,
'首页-猜你喜欢' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['in_page_pos'] = '猜你喜欢'
--AND params['tab_name']='精选'
AND params['card_type'] = 'search_word'
AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
--AND page_name='home' android的page_name为空
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'美购首页-大家都在搜' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'welfare_home'
AND params['card_type'] = 'search_word'
AND params['in_page_pos'] = '大家都在搜'
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'高亮词' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['card_type'] = 'highlight_word'
) click
on click.cl_id=t0.device_id
"""
.
format
(
start_date
=
yesterday_str
,
end_date
=
today_str
,
today_str
=
yesterday_str
)
print
(
sql_search_ctr
)
search_ctr_df
=
spark
.
sql
(
sql_search_ctr
)
search_ctr_df
.
show
(
1
)
sql_res
=
search_ctr_df
.
collect
()
tag_names_list_week
=
[]
for
name
in
sql_res
:
# print(name)
word
=
name
.
query
doc_id
=
name
.
device_id
print
(
word
,
doc_id
)
# partition_date = str(now + datetime.timedelta(days=-1))
# tag_names_list_week.append((word, nums, uv, partition_date))
#
# db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
# db='jerry_prod')
# cursor = db.cursor()
# insert_sql = "replace into daily_search_word_count_fix(word, nums, uv,pid,partition_day) VALUES(%s,%s,%s,%s,%s)"
# insert_list = []
# for count, item in enumerate(tag_names_list_week):
# word, nums, uv, partition_date = item
# try:
# if len(word) >= 200:
# continue
# pid = hashlib.md5((partition_date + word).encode("utf8")).hexdigest()
# insert_sql_tuple = (word, nums, uv, pid, partition_date)
# insert_list.append(insert_sql_tuple)
# # print(insert_sql_tuple)
# except:
# continue
# if count % 100 == 0:
# cursor.execute("set names 'UTF8'")
# res = cursor.executemany(insert_sql, insert_list)
# db.commit()
# # print(res)
# insert_list = []
# # print(count)
# res = cursor.executemany(insert_sql, insert_list)
# db.commit()
# db.close()
# print(res)
if
__name__
==
"__main__"
:
tag_names_list
=
[]
tag_names_list_week
=
[]
all_data_day
=
[]
all_data_week
=
[]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment