# -*- coding:UTF-8 -*-
# @Time  : 2020/9/8 13:39
# @File  : spark_test.py
# @email : litao@igengmei.com
# @author : litao

# -*- coding:UTF-8 -*-
# @Time  : 2020/9/4 17:07
# @File  : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao

import hashlib
import json

import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame


# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
from elasticsearch import Elasticsearch

exists_es_dic = {}
es = Elasticsearch([
    {
        'host': '172.16.31.17',
        'port': 9200,
    }, {
        'host': '172.16.31.11',
        'port': 9200,
    }])

def con_sql(sql):
    # 从数据库的表里获取数据

    db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
                         db='jerry_prod')
    cursor = db.cursor()
    cursor.execute(sql)
    result = cursor.fetchall()
    db.close()
    return result


startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
              "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
              "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
              "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("test")

spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
         .config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate())

spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")


# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
sql_search_ctr = r"""
SELECT query
     , all_search_uv as all_search_uv --全部搜索uv
     , t3.all_search_pv as all_search_pv --全部搜索pv
FROM (
         --搜索pvuv
         SELECT query
              , count(click.cl_id)          as all_search_pv
              , count(distinct click.cl_id) as all_search_uv
         FROM (
                  SELECT cl_id,
                         partition_date,
                         action,
                         params['page_name']  as page_name,
                         params['input_type'] as input_type,
                         app_version,
                         params['query']      as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND ((action = 'do_search' AND params['input_type'] <> 'everyone_watch') or
                         action = 'search_result_click_search')

                  UNION all
                  SELECT cl_id,
                         partition_date,
                         action,
                         params['page_name']  as page_name,
                         params['input_type'] as input_type,
                         app_version,
                         params['query']      as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND action = 'do_search'
                    and params['input_type'] = 'everyone_watch'
                    and params['tab'] = '精选'
                    and page_name = 'home'
                    AND params['query'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页

                  union all
                  SELECT cl_id,
                         partition_date,
                         action,
                         'search_home'   as page_name,
                         ''              as input_type,
                         app_version,
                         params['query'] as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND action = 'on_click_card'
                    AND params['page_name'] = 'search_home'

                  union all
                  SELECT cl_id,
                         partition_date,
                         action,
                         'home'              as page_name,
                         '首页-猜你喜欢'           as input_type,
                         app_version,
                         params['card_name'] as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND action = 'on_click_card'
                    AND params['in_page_pos'] = '猜你喜欢'
                    --AND params['tab_name']='精选'
                    AND params['card_type'] = 'search_word'
                    AND params['card_name'] not in ('AI测颜值', 'AI测肤质') --这两个词不跳转搜索结果页
                    --AND page_name='home' android的page_name为空

                  union all
                  SELECT cl_id,
                         partition_date,
                         action,
                         params['page_name'] as page_name,
                         '美购首页-大家都在搜'        as input_type,
                         app_version,
                         params['card_name'] as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND action = 'on_click_card'
                    AND params['page_name'] = 'welfare_home'
                    AND params['card_type'] = 'search_word'
                    AND params['in_page_pos'] = '大家都在搜'

                  union all
                  SELECT cl_id,
                         partition_date,
                         action,
                         params['page_name'] as page_name,
                         '高亮词'               as input_type,
                         app_version,
                         params['card_name'] as query
                  FROM online.bl_hdfs_maidian_updates
                  WHERE partition_date >= '{start_date}'
                    AND partition_date < '{end_date}'
                    AND action = 'on_click_card'
                    AND params['card_type'] = 'highlight_word'
              ) click
              
         GROUP BY  query
     ) t3 order by all_search_uv asc

""".format(start_date='20201017',end_date='20201116')

print(sql_search_ctr)
search_ctr_df = spark.sql(sql_search_ctr)
# spam_pv_df.createOrReplaceTempView("dev_view")
search_ctr_df.show(1)
sql_res = search_ctr_df.collect()

print("-------------------------------")
for res in sql_res:
    print(res)
    # print(res.query,res.search_pv)
    # results = es.search(
    #     index='gm-dbmw-diary-read',
    #     doc_type='diary',
    #     timeout='10s',
    #     body=body
    # )