Commit 320555a5 authored by litaolemo's avatar litaolemo

update

parent 577a8614
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
# coding=utf-8 # coding=utf-8
import hashlib import hashlib
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
import pymysql import pymysql
# from elasticsearch import Elasticsearch # from elasticsearch import Elasticsearch
...@@ -22,58 +25,203 @@ import datetime ...@@ -22,58 +25,203 @@ import datetime
# 'port': 9200, # 'port': 9200,
# }]) # }])
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("test")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate())
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
task_list = []
task_days = 80
for t in range(0, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
sql_search_ctr = r"""
SELECT query,
partition_date,
, all_search_uv as all_search_uv --全部搜索uv
, t3.all_search_pv as all_search_pv --全部搜索pv
FROM (
--搜索pvuv
SELECT query
, count(click.cl_id) as all_search_pv
, count(distinct click.cl_id) as all_search_uv
FROM (
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or
action = 'search_result_click_search')
UNION all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
params['input_type'] as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'do_search'
and params['input_type'] = 'everyone_watch'
and params['tab'] = '精选'
and page_name = 'home'
AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
union all
SELECT cl_id,
partition_date,
action,
'search_home' as page_name,
'' as input_type,
app_version,
params['query'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'search_home'
union all
SELECT cl_id,
partition_date,
action,
'home' as page_name,
'首页-猜你喜欢' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['in_page_pos'] = '猜你喜欢'
--AND params['tab_name']='精选'
AND params['card_type'] = 'search_word'
AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
--AND page_name='home' android的page_name为空
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'美购首页-大家都在搜' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['page_name'] = 'welfare_home'
AND params['card_type'] = 'search_word'
AND params['in_page_pos'] = '大家都在搜'
union all
SELECT cl_id,
partition_date,
action,
params['page_name'] as page_name,
'高亮词' as input_type,
app_version,
params['card_name'] as query
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '{start_date}'
AND partition_date < '{end_date}'
AND action = 'on_click_card'
AND params['card_type'] = 'highlight_word'
) click
GROUP BY query,partition_date
) t3 order by all_search_uv desc
""".format(start_date=today_str, end_date=yesterday_str)
print(sql_search_ctr)
search_ctr_df = spark.sql(sql_search_ctr)
# spam_pv_df.createOrReplaceTempView("dev_view")
search_ctr_df.show(1)
sql_res = search_ctr_df.collect()
# date_str = yesterday_date.strftime("%Y%m%d")
tag_names_list_week = []
for name in sql_res:
word = name.get("query")
nums = name.get("all_search_pv")
uv = name.get("all_search_uv")
partition_date = str(now + datetime.timedelta(days=-1))
tag_names_list_week.append((word, nums, uv, partition_date))
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
insert_sql = "replace into daily_search_word_count(word, nums, uv,pid,partition_day) VALUES(%s,%s,%s,%s,%s)"
insert_list = []
for count, item in enumerate(tag_names_list_week):
word, nums, uv, date_str = item
partition_date = date_str
pid = hashlib.md5((partition_date + word).encode("utf8")).hexdigest()
insert_sql_tuple = (word, nums, uv, pid, date_str)
insert_list.append(insert_sql_tuple)
# print(insert_sql)
if count % 100 == 0:
# cursor.execute("set names 'UTF8'")
res = cursor.executemany(insert_sql, insert_list)
db.commit()
# print(res)
insert_list = []
print(count)
res = cursor.executemany(insert_sql, insert_list)
db.commit()
db.close()
print(res)
if __name__ == "__main__": if __name__ == "__main__":
tag_names_list = [] tag_names_list = []
tag_names_list_week = [] tag_names_list_week = []
all_data_day = [] all_data_day = []
all_data_week = [] all_data_week = []
db_zhengxing_eagle = pymysql.connect(host="172.16.30.136", port=3306, user="doris",
password="o5gbA27hXHHm",
db="doris_prod",
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
zhengxing_cursor = db_zhengxing_eagle.cursor()
for count_date in range(0,3):
tag_names_list_week = []
date = datetime.datetime.now().date() - datetime.timedelta(days=count_date)
print(str(date))
yesterday_date = datetime.datetime.now().date() - datetime.timedelta(days=count_date+1)
# date_str = yesterday_date.strftime("%Y%m%d")
sql = """select keywords,sum(sorted) as nums,sum(uv) as uvs from api_search_word where is_delete = 0 and
create_time = "{yesterday_date}" group by keywords order by nums desc""".\
format(yesterday_date=str(yesterday_date))
print(sql)
zhengxing_cursor.execute("set names 'UTF8'")
zhengxing_cursor.execute(sql)
data = zhengxing_cursor.fetchall()
for name in list(data):
word = name.get("keywords", None)
nums = name.get("nums", 0)
uv = name.get("uvs", 0)
tag_names_list_week.append((word, nums, uv,str(yesterday_date)))
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
insert_sql = "replace into daily_search_word_count(word, nums, uv,pid,partition_day) VALUES(%s,%s,%s,%s,%s)"
insert_list = []
for count, item in enumerate(tag_names_list_week):
word, nums, uv, date_str = item
partition_date = date_str
pid = hashlib.md5((partition_date+word).encode("utf8")).hexdigest()
insert_sql_tuple = (word, nums, uv, pid, date_str)
insert_list.append(insert_sql_tuple)
# print(insert_sql)
if count % 100 == 0:
# cursor.execute("set names 'UTF8'")
res = cursor.executemany(insert_sql,insert_list)
db.commit()
# print(res)
insert_list = []
print(count)
res = cursor.executemany(insert_sql,insert_list)
db.commit()
print(res)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment