# -*- coding:UTF-8 -*- # @Time : 2020/8/21 16:43 # @File : search_strategy_d.py # @email : litao@igengmei.com # @author : litao import hashlib import json import pymysql import xlwt, datetime import redis # from pyhive import hive from maintenance.func_send_email_with_file import send_file_email from typing import Dict, List from elasticsearch_7 import Elasticsearch from elasticsearch_7.helpers import scan import sys import time from pyspark import SparkConf from pyspark.sql import SparkSession, DataFrame from pyspark.sql.functions import lit import pytispark.pytispark as pti # db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy', # db='jerry_prod') # cursor = db.cursor() startTime = time.time() sparkConf = SparkConf() sparkConf.set("spark.sql.crossJoin.enabled", True) sparkConf.set("spark.debug.maxToStringFields", "100") sparkConf.set("spark.tispark.plan.allow_index_double_read", False) sparkConf.set("spark.tispark.plan.allow_index_read", True) sparkConf.set("spark.hive.mapred.supports.subdirectories", True) sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True) sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") sparkConf.set("mapreduce.output.fileoutputformat.compress", False) sparkConf.set("mapreduce.map.output.compress", False) sparkConf.set("prod.gold.jdbcuri", "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true") sparkConf.set("prod.mimas.jdbcuri", "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true") sparkConf.set("prod.gaia.jdbcuri", "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true") sparkConf.set("prod.tidb.jdbcuri", "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.jerry.jdbcuri", "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000") sparkConf.set("prod.tidb.database", "jerry_prod") spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") .config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName( "LR PYSPARK TEST").enableHiveSupport().getOrCreate()) spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar") spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'") spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'") spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'") task_list = [] task_days = 1 for t in range(0, task_days): day_num = 0 - t now = (datetime.datetime.now() + datetime.timedelta(days=day_num)) last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d") today_str = now.strftime("%Y%m%d") yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d") one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d") sql = """SELECT t3.query as query ,t3.device_os_type as device_type ,t3.active_type as active_type ,t3.channel as channel_type ,NVL(t3.search_pv,0) as 1_pv ,NVL(t3.search_uv,0) as 1_uv ,NVL(t4.hexin_card_click_pv,0) as 1_search_core_pv ,NVL(t4.neirong_card_click_pv,0) as 1_search_pv LEFT JOIN ( SELECT query,active_type,device_os_type,channel,search_pv,search_uv FROM ( SELECT query,active_type,device_os_type,channel ,count(t1.cl_id) as search_pv ,count(distinct t1.cl_id) as search_uv FROM ( SELECT partition_date ,params['query'] as query ,cl_id FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND action in ('do_search','search_result_click_search') UNION ALL SELECT partition_date,params['query'] as query,cl_id FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND action = 'on_click_card' AND params['page_name']='search_home' UNION ALL SELECT partition_date ,params['card_name'] as query ,cl_id FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND action = 'on_click_card' AND params['in_page_pos']='猜你喜欢' AND params['tab_name']='精选' AND params['card_type']='search_word' UNION ALL SELECT partition_date ,params['card_name'] as query ,cl_id FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND action = 'on_click_card' AND page_name='welfare_home' AND params['card_type'] ='search_word' AND params['in_page_pos']='大家都在搜' UNION ALL SELECT partition_date ,params['card_name'] as query ,cl_id FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND int(split(app_version,'\\.')[1]) >= 27 AND action='on_click_card' AND params['card_type']='highlight_word' )t1 JOIN ( SELECT partition_date,device_id,t2.active_type,t2.channel,t2.device_os_type FROM ( SELECT partition_date,m.device_id ,array(device_os_type ,'合计') as device_os_type ,array(case WHEN active_type = '4' THEN '老活' WHEN active_type in ('1','2') then '新增' END ,'合计') as active_type ,array(CASE WHEN is_ai_channel = 'true' THEN 'AI' ELSE '其他' END , '合计') as channel FROM online.ml_device_day_active_status m LEFT JOIN (SELECT code,is_ai_channel,partition_day FROM DIM.DIM_AI_CHANNEL_ZP_NEW WHERE partition_day>= {yesterday_str} AND partition_day < {today_str}) tmp ON m.partition_date=tmp.partition_day AND first_channel_source_type=code WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND active_type in ('1','2','4') ) mas LATERAL VIEW explode(mas.channel) t2 AS channel LATERAL VIEW explode(mas.device_os_type) t2 AS device_os_type LATERAL VIEW explode(mas.active_type) t2 AS active_type )t2 on t1.cl_id=t2.device_id AND t1.partition_date = t2.partition_date GROUP BY query,active_type,device_os_type,channel )t )t3 on t1.query=t3.query and t1.active_type=t3.active_type and t1.device_os_type = t3.device_os_type AND t1.channel = t3.channel LEFT JOIN ( SELECT t1.query,active_type,device_os_type,channel ,sum(hexin) as hexin_card_click_pv ,sum(neirong) as neirong_card_click_pv FROM ( SELECT NVL(t2.partition_date,t3.partition_date) as partition_date ,NVL(t2.cl_id,t3.cl_id) as cl_id ,NVL(t2.query,t3.query) as query ,NVL(t2.pv,0) as hexin ,NVL(t3.pv,0) as neirong FROM ( SELECT partition_date ,params['query'] as query ,cl_id ,count(1) as pv FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND ((action in ('search_result_click_recommend_item','search_result_welfare_click_item','search_result_hospital_click_item','search_result_doctor_click_item','on_click_doctor_card', 'on_click_hospital_card') AND page_name in ('search_result_more','search_result_welfare','search_result_hospital','search_result_doctor')) or (action = 'goto_welfare_detail' AND params [ 'from' ] = 'search_result_welfare_recommend') or (action = 'on_click_card' AND params['card_content_type'] in ('service','hospital','doctor') AND page_name in ('search_result_more','search_result_welfare','search_result_hospital','search_result_doctor')) or (action = 'on_click_button' AND params['button_name'] = 'check_plan' AND page_name = 'search_result_more')) GROUP BY partition_date ,params['query'] ,cl_id )t2 FULL JOIN ( SELECT partition_date ,params['query'] as query ,cl_id ,count(1) as pv FROM online.bl_hdfs_maidian_updates WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND ((action in ('on_click_topic_card','on_click_diary_card','search_result_click_infomation_item') AND page_name in ('search_result_more','search_result_diary','search_result_post')) or (action = 'on_click_card' AND params['card_content_type'] in ('answer','diary') AND page_name in ('search_result_more','search_result_diary','search_result_question_answer'))) GROUP BY partition_date ,params['query'] ,cl_id )t3 on t3.partition_date=t2.partition_date AND t3.query=t2.query AND t3.cl_id=t2.cl_id )t1 JOIN ( SELECT partition_date,device_id,t2.active_type,t2.channel,t2.device_os_type FROM ( SELECT partition_date,m.device_id ,array(device_os_type ,'合计') as device_os_type ,array(case WHEN active_type = '4' THEN '老活' WHEN active_type in ('1','2') then '新增' END ,'合计') as active_type ,array(CASE WHEN is_ai_channel = 'true' THEN 'AI' ELSE '其他' END , '合计') as channel FROM online.ml_device_day_active_status m LEFT JOIN (SELECT code,is_ai_channel,partition_day FROM DIM.DIM_AI_CHANNEL_ZP_NEW WHERE partition_day>= {yesterday_str} AND partition_day < {today_str}) tmp ON m.partition_date=tmp.partition_day AND first_channel_source_type=code WHERE partition_date >= {yesterday_str} AND partition_date < {today_str} AND active_type in ('1','2','4') ) mas LATERAL VIEW explode(mas.channel) t2 AS channel LATERAL VIEW explode(mas.device_os_type) t2 AS device_os_type LATERAL VIEW explode(mas.active_type) t2 AS active_type )dev on t1.cl_id=dev.device_id and t1.partition_date = dev.partition_date GROUP BY t1.query,active_type,device_os_type,channel )t4 on t1.query=t4.query and t1.active_type=t4.active_type and t1.device_os_type = t4.device_os_type AND t1.channel = t4.channel """.format(today_str=today_str,yesterday_str=yesterday_str,) device_df = spark.sql(sql) device_df.createOrReplaceTempView("data_table") collects_sql = """ SELECT *,if(NVL(sum(1_uv),0) <> 0 ,concat(cast((NVL(sum(1_search_core_pv),0)/NVL(sum(1_uv),0)) as decimal(18,2)),'') , '-') as 1_core_pv_division_uv ,if(NVL(sum(1_uv),0) <> 0 ,concat(cast((NVL(sum(1_search_pv),0)/NVL(sum(1_uv),0)) as decimal(18,2)),'') , '-') as 1_pv_division_uv FROM data_table GROUP BY device_type,active_type,channel_type """ finnal_df = spark.sql(collects_sql) finnal_df.show(1, False) sql_res = finnal_df.collect() for res in sql_res: print(res)