# -*- coding:UTF-8 -*- # @Time : 2020/9/4 17:07 # @File : search_meigou_ctr.py # @email : litao@igengmei.com # @author : litao import hashlib import json import pymysql import xlwt, datetime import redis # from pyhive import hive from maintenance.func_send_email_with_file import send_file_email from typing import Dict, List from elasticsearch_7 import Elasticsearch from elasticsearch_7.helpers import scan import sys import time from pyspark import SparkConf from pyspark.sql import SparkSession, DataFrame # from pyspark.sql.functions import lit # import pytispark.pytispark as pti def con_sql(sql): # 从数据库的表里获取数据 db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy', db='jerry_prod') cursor = db.cursor() cursor.execute(sql) result = cursor.fetchall() db.close() return result startTime = time.time() sparkConf = SparkConf() sparkConf.set("spark.sql.crossJoin.enabled", True) sparkConf.set("spark.debug.maxToStringFields", "100") sparkConf.set("spark.tispark.plan.allow_index_double_read", False) sparkConf.set("spark.tispark.plan.allow_index_read", True) sparkConf.set("spark.hive.mapred.supports.subdirectories", True) sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True) sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") sparkConf.set("mapreduce.output.fileoutputformat.compress", False) sparkConf.set("mapreduce.map.output.compress", False) sparkConf.set("prod.gold.jdbcuri", "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true") sparkConf.set("prod.mimas.jdbcuri", "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true") sparkConf.set("prod.gaia.jdbcuri", "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true") sparkConf.set("prod.tidb.jdbcuri", "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.jerry.jdbcuri", "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000") sparkConf.set("prod.tidb.database", "jerry_prod") sparkConf.setAppName("search_answer_ctr") spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") .config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate()) spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar") spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'") spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'") spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'") task_list = [] task_days = 3 for t in range(1, task_days): day_num = 0 - t now = (datetime.datetime.now() + datetime.timedelta(days=day_num)) last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d") today_str = now.strftime("%Y%m%d") yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d") one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d") sql_search_ctr = r""" select D.ACTIVE_TYPE,D.DEVICE_OS_TYPE,sum(T.CLICK_NUM) as CLICK_NUM,sum(C.EXPOSURE) as EXPOSURE from (SELECT T.DEVICE_ID as DEVICE_ID, --设备ID T.CARD_ID as CARD_ID, --卡片ID COUNT(T.CARD_ID) AS EXPOSURE --点击次数 FROM ML.MID_ML_C_ET_PE_PRECISEEXPOSURE_DIMEN_D T WHERE T.PARTITION_DAY = '{partition_day}' AND T.PAGE_CODE = 'search_result_question_answer' GROUP BY T.DEVICE_ID, T.CARD_ID) C left join (SELECT T.DEVICE_ID, --设备ID T.CARD_ID, --卡片ID SUM(T.CLICK_NUM) AS CLICK_NUM --点击次数 FROM ML.ML_C_ET_CK_CLICK_DIMEN_D T WHERE T.PARTITION_DAY = '{partition_day}' AND T.PAGE_CODE = 'search_result_question_answer' AND T.ACTION IN ('on_click_card') GROUP BY T.DEVICE_ID, T.CARD_ID) T on C.DEVICE_ID=T.DEVICE_ID and C.CARD_ID = T.CARD_ID LEFT JOIN ( SELECT T.DEVICE_ID, T.DEVICE_OS_TYPE, T.ACTIVE_TYPE FROM ML.ML_C_CT_DV_DEVICE_DIMEN_D T WHERE T.PARTITION_DAY = '{partition_day}' AND T.ACTIVE_TYPE IN ('1', '2', '4')) D on C.DEVICE_ID = D.DEVICE_ID LEFT JOIN ( SELECT DISTINCT device_id FROM ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除) WHERE partition_day='{partition_day}' UNION ALL SELECT DISTINCT device_id FROM dim.dim_device_user_staff --去除内网用户 )spam_pv on spam_pv.device_id=T.DEVICE_ID LEFT JOIN ( SELECT partition_date,device_id FROM (--找出user_id当天活跃的第一个设备id SELECT user_id,partition_date, if(size(device_list) > 0, device_list [ 0 ], '') AS device_id FROM online.ml_user_updates WHERE partition_date>='{partition_day}' AND partition_date<'{end_date}' )t1 JOIN ( --医生账号 SELECT distinct user_id FROM online.tl_hdfs_doctor_view WHERE partition_date = '{partition_day}' --马甲账号/模特用户 UNION ALL SELECT user_id FROM ml.ml_c_ct_ui_user_dimen_d WHERE partition_day = '{partition_day}' AND (is_puppet = 'true' or is_classifyuser = 'true') UNION ALL --公司内网覆盖用户 select distinct user_id from dim.dim_device_user_staff UNION ALL --登陆过医生设备 SELECT distinct t1.user_id FROM ( SELECT user_id, v.device_id as device_id FROM online.ml_user_history_detail LATERAL VIEW EXPLODE(device_history_list) v AS device_id WHERE partition_date = '{partition_day}' )t1 JOIN ( SELECT device_id FROM online.ml_device_history_detail WHERE partition_date = '{partition_day}' AND is_login_doctor = '1' )t2 ON t1.device_id = t2.device_id )t2 on t1.user_id=t2.user_id group by partition_date,device_id )dev on T.DEVICE_ID=dev.device_id WHERE (spam_pv.device_id IS NULL or spam_pv.device_id = '') and (dev.device_id is null or dev.device_id='') GROUP by D.DEVICE_OS_TYPE, D.ACTIVE_TYPE """.format(partition_day=yesterday_str, end_date=today_str) print(sql_search_ctr) search_ctr_df = spark.sql(sql_search_ctr) # spam_pv_df.createOrReplaceTempView("dev_view") search_ctr_df.show(1) sql_res = search_ctr_df.collect() res_dict = { "新增": { "ios": {"click_num": 0, "exposure": 0}, "android": {"click_num": 0, "exposure": 0} }, "老活": { "ios": {"click_num": 0, "exposure": 0}, "android": {"click_num": 0, "exposure": 0} } } print("-------------------------------") db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy', db='jerry_prod') cursor = db.cursor() for res in sql_res: print(res) if res.ACTIVE_TYPE: if res.ACTIVE_TYPE in ('1', '2'): if res.CLICK_NUM: res_dict["新增"][res.DEVICE_OS_TYPE]["click_num"] += res.CLICK_NUM if res.EXPOSURE: res_dict["新增"][res.DEVICE_OS_TYPE]["exposure"] += res.EXPOSURE else: if res.CLICK_NUM: res_dict["老活"][res.DEVICE_OS_TYPE]["click_num"] += res.CLICK_NUM if res.EXPOSURE: res_dict["老活"][res.DEVICE_OS_TYPE]["exposure"] += res.EXPOSURE for active_type in res_dict: for device_os_type in res_dict[active_type]: partition_date = yesterday_str pid = hashlib.md5((partition_date + device_os_type + active_type).encode("utf8")).hexdigest() click_num = res_dict[active_type][device_os_type]["click_num"] exposure = res_dict[active_type][device_os_type]["exposure"] try: search_ctr = round(click_num / exposure, 5) except: search_ctr = 0 instert_sql = """replace into search_answer_ctr( partition_date,device_os_type,active_type,pid,click_num,exposure,search_ctr) VALUES('{partition_date}','{device_os_type}','{active_type}','{pid}',{click_num},{exposure},{search_ctr});""".format( partition_date=partition_date, device_os_type=device_os_type, active_type=active_type, pid=pid, click_num=click_num, exposure=exposure, search_ctr=search_ctr ) print(instert_sql) # cursor.execute("set names 'UTF8'") res = cursor.execute(instert_sql) db.commit() print(res) # cursor.executemany() db.close()