# -*- coding:UTF-8 -*- # @Time : 2020/11/13 11:08 # @File : daily_search_word_count.py # @email : litao@igengmei.com # @author : litao # coding=utf-8 import hashlib import time from pyspark import SparkConf from pyspark.sql import SparkSession, DataFrame import pymysql # from elasticsearch import Elasticsearch import datetime # from maintenance.func_send_email_with_file import send_file_email # import zipfile # es = Elasticsearch([ # { # 'host': '172.16.31.17', # 'port': 9200, # }, { # 'host': '172.16.31.11', # 'port': 9200, # }]) startTime = time.time() sparkConf = SparkConf() sparkConf.set("spark.sql.crossJoin.enabled", True) sparkConf.set("spark.debug.maxToStringFields", "100") sparkConf.set("spark.tispark.plan.allow_index_double_read", False) sparkConf.set("spark.tispark.plan.allow_index_read", True) sparkConf.set("spark.hive.mapred.supports.subdirectories", True) sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True) sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") sparkConf.set("mapreduce.output.fileoutputformat.compress", False) sparkConf.set("mapreduce.map.output.compress", False) sparkConf.set("prod.gold.jdbcuri", "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true") sparkConf.set("prod.mimas.jdbcuri", "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true") sparkConf.set("prod.gaia.jdbcuri", "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true") sparkConf.set("prod.tidb.jdbcuri", "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") # sparkConf.set("prod.jerry.jdbcuri", # "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000") # sparkConf.set("prod.tidb.database", "jerry_prod") sparkConf.setAppName("test") spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") .config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate()) spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar") spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'") spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'") spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'") # print(huidu_device_id_sql) # huidu_device_id_df = spark.sql(huidu_device_id_sql) # huidu_device_id_df.createOrReplaceTempView("dev_view") task_list = [] task_days = 3 for t in range(0, task_days): day_num = 0 - t now = (datetime.datetime.now() + datetime.timedelta(days=day_num)) last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d") today_str = now.strftime("%Y%m%d") yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d") one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d") sql_search_ctr = r""" SELECT query, partition_date, all_search_uv as all_search_uv, --全部搜索uv all_search_pv as all_search_pv --全部搜索pv FROM ( --搜索pvuv SELECT query , count(click.cl_id) as all_search_pv , count(distinct click.cl_id) as all_search_uv, partition_date FROM ( SELECT cl_id, partition_date, action, params['page_name'] as page_name, params['input_type'] as input_type, app_version, params['query'] as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or action = 'search_result_click_search') UNION all SELECT cl_id, partition_date, action, params['page_name'] as page_name, params['input_type'] as input_type, app_version, params['query'] as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND action = 'do_search' and params['input_type'] = 'everyone_watch' and params['tab'] = '精选' and page_name = 'home' AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页 union all SELECT cl_id, partition_date, action, 'search_home' as page_name, '' as input_type, app_version, coalesce(params['query'],params['card_name']) as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND action = 'on_click_card' AND params['page_name'] = 'search_home' union all SELECT cl_id, partition_date, action, 'home' as page_name, '首页-猜你喜欢' as input_type, app_version, params['card_name'] as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND action = 'on_click_card' AND params['in_page_pos'] = '猜你喜欢' --AND params['tab_name']='精选' AND params['card_type'] = 'search_word' AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页 --AND page_name='home' android的page_name为空 union all SELECT cl_id, partition_date, action, params['page_name'] as page_name, '美购首页-大家都在搜' as input_type, app_version, params['card_name'] as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND action = 'on_click_card' AND params['page_name'] = 'welfare_home' AND params['card_type'] = 'search_word' AND params['in_page_pos'] = '大家都在搜' union all SELECT cl_id, partition_date, action, params['page_name'] as page_name, '高亮词' as input_type, app_version, params['card_name'] as query FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{start_date}' AND partition_date < '{end_date}' AND action = 'on_click_card' AND params['card_type'] = 'highlight_word' ) click GROUP BY query,partition_date ) t3 order by all_search_uv desc """.format(start_date=yesterday_str, end_date=today_str) print(sql_search_ctr) search_ctr_df = spark.sql(sql_search_ctr) search_ctr_df.show(1) sql_res = search_ctr_df.collect() tag_names_list_week = [] for name in sql_res: # print(name) word = name.query nums = name.all_search_pv uv = name.all_search_uv partition_date = str(now + datetime.timedelta(days=-1)) tag_names_list_week.append((word, nums, uv,partition_date)) db = pymysql.connect(host='172.16.50.175', port=3306, user='doris', passwd='o5gbA27hXHHm', db='doris_olap') cursor = db.cursor() insert_sql = "replace into daily_search_word_count(word, nums, uv,pid,partition_day) VALUES(%s,%s,%s,%s,%s)" insert_list = [] for count, item in enumerate(tag_names_list_week): word, nums, uv,partition_date = item try: if len(word) >= 200: continue pid = hashlib.md5((partition_date + word).encode("utf8")).hexdigest() insert_sql_tuple = (word, nums, uv, pid, partition_date) insert_list.append(insert_sql_tuple) # print(insert_sql_tuple) except: continue if count % 100 == 0: cursor.execute("set names 'UTF8'") res = cursor.executemany(insert_sql, insert_list) db.commit() # print(res) insert_list = [] # print(count) res = cursor.executemany(insert_sql, insert_list) db.commit() db.close() print(res) if __name__ == "__main__": tag_names_list = [] tag_names_list_week = [] all_data_day = [] all_data_week = []