# -*- coding:UTF-8 -*- # @Time : 2020/9/4 17:07 # @File : search_meigou_ctr.py # @email : litao@igengmei.com # @author : litao import hashlib import json import pymysql import xlwt, datetime import redis # from pyhive import hive from maintenance.func_send_email_with_file import send_file_email from typing import Dict, List from elasticsearch_7 import Elasticsearch from elasticsearch_7.helpers import scan import sys import time from pyspark import SparkConf from pyspark.sql import SparkSession, DataFrame # from pyspark.sql.functions import lit # import pytispark.pytispark as pti def con_sql(sql): # 从数据库的表里获取数据 db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy', db='jerry_prod') cursor = db.cursor() cursor.execute(sql) result = cursor.fetchall() db.close() return result startTime = time.time() sparkConf = SparkConf() sparkConf.set("spark.sql.crossJoin.enabled", True) sparkConf.set("spark.debug.maxToStringFields", "100") sparkConf.set("spark.tispark.plan.allow_index_double_read", False) sparkConf.set("spark.tispark.plan.allow_index_read", True) sparkConf.set("spark.hive.mapred.supports.subdirectories", True) sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True) sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") sparkConf.set("mapreduce.output.fileoutputformat.compress", False) sparkConf.set("mapreduce.map.output.compress", False) sparkConf.set("prod.gold.jdbcuri", "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true") sparkConf.set("prod.mimas.jdbcuri", "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true") sparkConf.set("prod.gaia.jdbcuri", "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true") sparkConf.set("prod.tidb.jdbcuri", "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.jerry.jdbcuri", "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000") sparkConf.set("prod.tidb.database", "jerry_prod") sparkConf.setAppName("search_diary_ctr") spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") .config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName( "search_diary_ctr").enableHiveSupport().getOrCreate()) spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar") spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'") spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'") spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'") task_list = [] task_days = 3 for t in range(1, task_days): day_num = 0 - t now = (datetime.datetime.now() + datetime.timedelta(days=day_num)) last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d") today_str = now.strftime("%Y%m%d") yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d") one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d") sql_search_ctr = r""" SELECT exp.partition_date as partition_date ,active_type ,device_os_type ,sum(service_exp_pv) as service_exp_pv ,sum(neirong_exp_pv) as neirong_exp_pv ,sum(service_click_pv) as service_click_pv ,sum(neirong_click_pv) as neirong_click_pv FROM ( SELECT t1.partition_day as partition_date,device_id ,service_exp_pv,neirong_exp_pv,service_click_pv,neirong_click_pv FROM (--搜索结果页卡片精准曝光 SELECT partition_day, device_id, count(CASE WHEN card_content_type='service' THEN 1 END) as service_exp_pv, count(CASE WHEN card_content_type<>'service' THEN 1 END) as neirong_exp_pv FROM ( SELECT device_id,partition_day,card_content_type FROM ml.mid_ml_c_et_pe_preciseexposure_dimen_d WHERE partition_day >= '{partition_day}' and partition_day < '{end_date}' and action in ('page_precise_exposure','home_choiceness_card_exposure') and is_exposure = '1' and page_code in ('search_result_diary','search_result_doctor','search_result_hospital','search_result_more' ,'search_result_more_infomation','search_result_more_user','search_result_post','search_result_welfare' ,'search_result_wiki','search_result_question_answer') AND card_content_type IN ('diary') )a group by partition_day,card_content_type,device_id )t1 LEFT JOIN (--搜索结果页卡片点击 SELECT cl_id,partition_date ,sum(CASE WHEN card_content_type='service' THEN click_pv END) as service_click_pv ,sum(CASE WHEN card_content_type='neirong' THEN click_pv END) as neirong_click_pv FROM ( SELECT partition_date,cl_id,'service' as card_content_type,count(1) as click_pv FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{partition_day}' AND partition_date < '{end_date}' AND ((action in ('search_result_click_recommend_item','search_result_welfare_click_item') AND page_name in ('search_result_more','search_result_welfare')) or (action = 'goto_welfare_detail' AND params ['from'] = 'search_result_welfare_recommend') or (action = 'on_click_card' AND params['card_content_type'] in ('service') AND page_name in ('search_result_more','search_result_welfare'))) GROUP BY partition_date,cl_id,'service' UNION ALL SELECT partition_date,cl_id,'neirong' as card_content_type,count(1) as click_pv FROM online.bl_hdfs_maidian_updates WHERE partition_date >= '{partition_day}' AND partition_date < '{end_date}' AND ((action in ('on_click_topic_card','on_click_diary_card','search_result_click_infomation_item') AND page_name in ('search_result_more','search_result_diary','search_result_post')) or (action = 'on_click_card' AND params['card_content_type'] in ('answer','diary') AND page_name in ('search_result_more','search_result_diary','search_result_question_answer'))) GROUP BY partition_date,cl_id,'neirong' )t2 GROUP BY cl_id,partition_date )t2 ON t1.partition_day=t2.partition_date AND t1.device_id=t2.cl_id )exp JOIN ( SELECT partition_date,device_id,t2.active_type,t2.channel,t2.device_os_type FROM ( SELECT partition_date,m.device_id ,array(device_os_type ,'合计') as device_os_type ,array(case WHEN active_type = '4' THEN '老活' WHEN active_type in ('1','2') then '新增' END ,'合计') as active_type ,array(CASE WHEN is_ai_channel = 'true' THEN 'AI' ELSE '其他' END , '合计') as channel FROM online.ml_device_day_active_status m LEFT JOIN (SELECT code,is_ai_channel,partition_day FROM DIM.DIM_AI_CHANNEL_ZP_NEW WHERE partition_day>= '{partition_day}' AND partition_day < '{end_date}' ) tmp ON m.partition_date=tmp.partition_day AND first_channel_source_type=code where partition_date >= '{partition_day}' AND partition_date < '{end_date}' AND active_type in ('1','2','4') ) mas LATERAL VIEW explode(mas.channel) t2 AS channel LATERAL VIEW explode(mas.device_os_type) t2 AS device_os_type LATERAL VIEW explode(mas.active_type) t2 AS active_type )dev_channel on dev_channel.device_id = exp.device_id AND dev_channel.partition_date = exp.partition_date GROUP BY exp.partition_date,active_type,device_os_type """.format(partition_day=yesterday_str, end_date=today_str) print(sql_search_ctr) search_ctr_df = spark.sql(sql_search_ctr) # spam_pv_df.createOrReplaceTempView("dev_view") search_ctr_df.show(1) sql_res = search_ctr_df.collect() print("-------------------------------") for res in sql_res: print(res) device_os_type = res.device_os_type active_type = res.active_type partition_date = yesterday_str pid = hashlib.md5((partition_date + device_os_type + active_type).encode("utf8")).hexdigest() click_num = res.neirong_click_pv exposure = res.neirong_exp_pv try: search_ctr = round(click_num / exposure, 5) except: search_ctr = 0 instert_sql = """replace into search_diary_ctr( partition_date,device_os_type,active_type,pid,click_num,exposure,search_ctr) VALUES('{partition_date}','{device_os_type}','{active_type}','{pid}',{click_num},{exposure},{search_ctr});""".format( partition_date=partition_date, device_os_type=device_os_type, active_type=active_type, pid=pid, click_num=click_num, exposure=exposure, search_ctr=search_ctr ) print(instert_sql) # cursor.execute("set names 'UTF8'") db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy', db='jerry_prod') cursor = db.cursor() res = cursor.execute(instert_sql) db.commit() print(res)