# -*- coding:UTF-8 -*-
# @Time  : 2020/9/11 17:37
# @File  : ecommerce_income_report.py
# @email : litao@igengmei.com
# @author : litao


# -*- coding:UTF-8 -*-
# @Time  : 2020/9/4 17:07
# @File  : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao

import hashlib
import json

import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame


# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti


def con_sql(sql):
    # 从数据库的表里获取数据

    db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
                         db='jerry_prod')
    cursor = db.cursor()
    cursor.execute(sql)
    result = cursor.fetchall()
    db.close()
    return result


startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
              "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
              "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
              "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("search_diary_ctr")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
         .config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName(
    "search_diary_ctr").enableHiveSupport().getOrCreate())

spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")

task_list = []
task_days = 3
for t in range(1, task_days):
    day_num = 0 - t
    now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
    last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
    today_str = now.strftime("%Y%m%d")
    yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
    one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")

    # CPT日均点击
    cpc_daily_click_sql = r"""
SELECT partition_date,count(1) as pv
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >=  '{start_date}'
and partition_date < '{end_date}' 
AND ((ACTION = 'search_result_welfare_click_item' AND PAGE_NAME = 'search_result_welfare' AND PARAMS['transaction_type'] = 'advertise')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'category' AND PARAMS['transaction_type'] = 'operating' AND PARAMS['tab_name'] = 'service')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'welfare_home_list_item' and PARAMS['transaction_type'] = 'advertise')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'welfare_list' AND PARAMS['transaction_type'] = 'advertise') 
OR (ACTION = 'on_click_card' AND PARAMS['card_content_type'] = 'service' AND PARAMS['page_name'] IN ('new_sign','search_result_welfare','category','welfare_home_list_item','welfare_list') AND PARAMS['transaction_type'] = 'advertise')) 
group BY partition_date
    """.format(partition_day=yesterday_str, end_date=today_str)

    print(cpc_daily_click_sql)
    cpc_daily_click_df = spark.sql(cpc_daily_click_sql)
    cpc_daily_click_df.createOrReplaceTempView("cpc_daily_click")
    cpc_daily_click_df.show(1)
    sql_res = cpc_daily_click_df.collect()


    # 商详页PV
    bus_detail_sql = r"""
SELECT 
    partition_date,count(1) welfare_pv
FROM 
(
    SELECT cl_id,partition_date
    FROM bl_hdfs_maidian_updates
    WHERE partition_date >='{start_date}'and partition_date < '{end_date}'
    AND action='page_view'
    AND params['page_name'] = 'welfare_detail'
)a1
JOIN
(
    SELECT device_id,partition_date
    from online.ml_device_day_active_status
    WHERE partition_date >='{start_date}'and partition_date < '{end_date}'
    AND active_type in ('1','2','4')  
)a2
on a2.device_id = a1.cl_id
AND a2.partition_date=a1.partition_date
group by partition_date
        """.format(partition_day=yesterday_str, end_date=today_str)

    print(bus_detail_sql)
    bus_detail_df = spark.sql(bus_detail_sql)
    bus_detail_df.createOrReplaceTempView("bus_detail")
    bus_detail_df.show(1)
    sql_res = bus_detail_df.collect()


    # --cpc当日预算(有效口径)
    cpc_budget_sql = r"""
SELECT day_id,sum(budget) as budget
FROM
(
    SELECT T1.day_id,T1.merchant_doctor_id,case when merchant_budget>=tot_service_budget then tot_service_budget else merchant_budget end as budget
    FROM
    (
        SELECT 
          substr(clicklog.create_time,1,10) AS day_id
          ,clicklog.merchant_doctor_id 
          ,max(merchant_budget) as merchant_budget --商户预算
        FROM 
        (
            SELECT id,promote_id,price,service_budget,merchant_budget,merchant_doctor_id,create_time,recharge
            FROM online.tl_hdfs_cpc_clicklog_view
            WHERE partition_date='{partition_date}'
            AND regexp_replace(substr(create_time,1,10),'-','')>= '{start_date}'
            AND regexp_replace(substr(create_time,1,10),'-','')<'{end_date}'
        )clicklog
        group by substr(clicklog.create_time,1,10),clicklog.merchant_doctor_id 
    )T1 
    LEFT JOIN
    (
        SELECT 
             day_id
             ,merchant_doctor_id
            ,sum(service_budget) as tot_service_budget
        FROM
        (
            SELECT 
              substr(clicklog.create_time,1,10) AS day_id
              ,clicklog.merchant_doctor_id,clicklog.service_id
              ,max(service_budget) as service_budget
            
            FROM 
            (
                SELECT id,promote_id,price,service_budget,merchant_budget,merchant_doctor_id,service_id,create_time
                FROM  online.tl_hdfs_cpc_clicklog_view
                WHERE partition_date='{partition_date}'
                AND regexp_replace(substr(create_time,1,10),'-','')>= '{start_date}'
                AND regexp_replace(substr(create_time,1,10),'-','')<'{end_date}'
            )clicklog
            GROUP BY substr(clicklog.create_time,1,10),clicklog.merchant_doctor_id,clicklog.service_id
        )service_budget
        GROUP BY day_id,merchant_doctor_id
    )T2 
    ON T1.day_id=T2.day_id
    AND T1.merchant_doctor_id=T2.merchant_doctor_id
)T
GROUP BY day_id
            """.format(partition_day=yesterday_str, end_date=today_str)

    print(cpc_budget_sql)
    cpc_budget_df = spark.sql(cpc_budget_sql)
    cpc_budget_df.createOrReplaceTempView("cpc_budget")
    cpc_budget_df.show(1)
    sql_res = cpc_budget_df.collect()

    # cpc收入、广告总消耗
    cpc_income_sql = r"""
    select partition_day,
sum(case when advertise_type = 'cpc' AND advertise_business_type in('service') and advertise_calculate_type='cpc_log' then cpc_click_num end)   cpc_click_num,---    当天cpc商品点击量
sum(case when advertise_type = 'cpc' AND advertise_business_type in('service') and advertise_calculate_type='cpc_flownext' then proportion_expend_amount end)  cpc_proportion_expend_amount,---    当天cpc总收入(含返点)
sum(case when advertise_type = 'cpc' AND advertise_business_type in('service') and advertise_calculate_type='cpc_flownext' then proportion_expend_recharge_amount end)   cpc_proportion_expend_recharge_amount,---    当天cpc收入(不含返点)
SUM(CASE
      WHEN advertise_type = 'cpc' AND advertise_calculate_type = 'cpc_flownext' THEN
       proportion_expend_amount
      WHEN advertise_type = 'cpt' AND advertise_calculate_type = 'cpt_schedule' THEN
       proportion_expend_amount
      WHEN advertise_type IN ('browse', 'message', 'valueadded','rechargededuction') THEN
       proportion_expend_amount
      WHEN advertise_type = 'adjustment' AND advertise_calculate_type ='adjustment_flow' THEN
       proportion_expend_amount
      ELSE
       0
    END) tol_proportion_expend_amount --等比例返点消耗总金额

from ml.ml_c_ct_mc_merchantadclassify_indic_d
where partition_day>='{start_date}' AND partition_day <'{end_date}'
group by partition_day
 """.format(partition_day=yesterday_str, end_date=today_str)

    print(cpc_income_sql)
    cpc_income_df = spark.sql(cpc_income_sql)
    cpc_income_df.createOrReplaceTempView("cpc_income")
    cpc_income_df.show(1)
    sql_res = cpc_income_df.collect()




    for active_type in res_dict:
            db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
                                 db='jerry_prod')
            cursor = db.cursor()
            partition_date = yesterday_str
            pid = hashlib.md5((partition_date + device_os_type + active_type).encode("utf8")).hexdigest()
            cpc_daily_click_sql = """replace into search_diary_ctr(
            partition_date,device_os_type,active_type,pid,click_num,exposure,search_ctr) VALUES('{partition_date}','{device_os_type}','{active_type}','{pid}',{click_num},{exposure},{search_ctr});""".format(
                partition_date=partition_date, device_os_type=device_os_type, active_type=active_type, pid=pid, click_num=click_num,
                exposure=exposure, search_ctr=search_ctr
            )
            print(instert_sql)
            # cursor.execute("set names 'UTF8'")
            res = cursor.execute(instert_sql)
            db.commit()
            print(res)
# cursor.executemany()
db.close()