Commit 08810d0e authored by litaolemo's avatar litaolemo

update

parent 25a7b38f
# -*- coding:UTF-8 -*-
# @Time : 2020/8/20 9:42
# @File : recommend_strategy_d.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/8/19 11:53
# @File : from_sparksql_to_mysql.py
# @File : conent_detail_page_grayscale_ctr.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
......
# -*- coding:UTF-8 -*-
# @Time : 2020/9/4 17:07
# @File : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
import json
import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import lit
import pytispark.pytispark as pti
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
def con_sql(sql):
# 从数据库的表里获取数据
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
db.close()
return result
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName(
"LR PYSPARK TEST").enableHiveSupport().getOrCreate())
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
task_list = []
task_days = 1
for t in range(0, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
# sql_dev_device_id = """
# SELECT partition_date,device_id
# FROM
# (--找出user_id当天活跃的第一个设备id
# SELECT user_id,partition_date,
# if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
# FROM online.ml_user_updates
# WHERE partition_date>='{yesterday_str}' AND partition_date<'{today_str}'
# )t1
# JOIN
# ( --医生账号
# SELECT distinct user_id
# FROM online.tl_hdfs_doctor_view
# WHERE partition_date = '{yesterday_str}'
#
# --马甲账号/模特用户
# UNION ALL
# SELECT user_id
# FROM ml.ml_c_ct_ui_user_dimen_d
# WHERE partition_day = '{yesterday_str}'
# AND (is_puppet = 'true' or is_classifyuser = 'true')
#
# UNION ALL
# --公司内网覆盖用户
# select distinct user_id
# from dim.dim_device_user_staff
#
# UNION ALL
# --登陆过医生设备
# SELECT distinct t1.user_id
# FROM
# (
# SELECT user_id, v.device_id as device_id
# FROM online.ml_user_history_detail
# LATERAL VIEW EXPLODE(device_history_list) v AS device_id
# WHERE partition_date = '{yesterday_str}'
# )t1
# JOIN
# (
# SELECT device_id
# FROM online.ml_device_history_detail
# WHERE partition_date = '{yesterday_str}'
# AND is_login_doctor = '1'
# )t2
# ON t1.device_id = t2.device_id
# )t2
# on t1.user_id=t2.user_id
# group by partition_date,device_id
# """.format(yesterday_str=yesterday_str, today_str=today_str)
# print(sql_dev_device_id)
# dev_df = spark.sql(sql_dev_device_id)
# dev_df_view = dev_df.createOrReplaceTempView("dev_view")
# dev_df.cache()
# dev_df.show(1)
# sql_res = dev_df.collect()
# for res in sql_res:
# print(res)
#
# print("-------------------------------")
#
# sql_spam_pv_device_id = """
# SELECT DISTINCT device_id
# FROM ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除)
# WHERE partition_day='{yesterday_str}'
#
# UNION ALL
# SELECT DISTINCT device_id
# FROM dim.dim_device_user_staff
#
# """.format(yesterday_str=yesterday_str)
# print(sql_spam_pv_device_id)
# spam_pv_df = spark.sql(sql_spam_pv_device_id)
# spam_pv_df.createOrReplaceTempView("spam_pv")
# spam_pv_df.show(1)
# sql_res = spam_pv_df.collect()
# spam_pv_df.cache()
# for res in sql_res:
# print(res)
print("-------------------------------")
sql_spam_pv_device_id = """
SELECT T.DEVICE_ID, --设备ID
T.CARD_ID, --卡片ID
SUM(T.CLICK_NUM) AS CLICK_NUM, --点击次数
C.EXPOSURE as EXPOSURE --曝光次数
if(NVL(C.EXPOSURE,0) <> 0 ,cast((NVL(T.CLICK_NUM,0)/NVL(C.EXPOSURE,0)) as decimal(18,5)) , 0) as search_ctr
FROM ML.ML_C_ET_CK_CLICK_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_welfare'
AND T.ACTION IN ('goto_welfare_detail','search_result_welfare_click_item')
GROUP BY T.DEVICE_ID,
T.CARD_ID
left join
(SELECT T.DEVICE_ID as DEVICE_ID, --设备ID
T.CARD_ID as CARD_ID, --卡片ID
COUNT(T.CARD_ID) AS EXPOSURE --点击次数
FROM ML.MID_ML_C_ET_PE_PRECISEEXPOSURE_DIMEN_D T
WHERE T.PARTITION_DAY = '{partition_day}'
AND T.PAGE_CODE = 'search_result_welfare'
AND T.CARD_TYPE = 'common_card'
GROUP BY T.DEVICE_ID,
T.CARD_ID) C on T.DEVICE_ID=C.DEVICE_ID and T.CARD_ID = C.CARD_ID
""".format(partition_day=yesterday_str)
print(sql_spam_pv_device_id)
spam_pv_df = spark.sql(sql_spam_pv_device_id)
spam_pv_df.createOrReplaceTempView("spam_pv")
spam_pv_df.show(1)
sql_res = spam_pv_df.collect()
spam_pv_df.cache()
for res in sql_res:
print(res)
print("-------------------------------")
# for res in sql_res:
# # print(res)
# day_id = res.day_id
# device_os_type = res.device_os_type
# active_type = res.active_type
# grey_type = res.grey_type
# page_name = res.page_name
# content_pv = res.content_pv
# content_uv = res.content_uv
# wel_exp_pv = res.wel_exp_pv
# content_exp_pv = res.content_exp_pv
# meigou_ctr=res.meigou_ctr
# if not meigou_ctr: meigou_ctr = 0
# grey_meigou_ctr=res.grey_meigou_ctr
# neirong_ctr=res.neirong_ctr
# if not neirong_ctr: neirong_ctr = 0
# grey_neirong_ctr=res.grey_neirong_ctr
#
# wel_click_pv = res.wel_click_pv
# content_click_pv = res.content_click_pv
# slide_wel_click_pv = res.slide_wel_click_pv
# self_wel_click_pv = res.self_wel_click_pv
# partition_day = res.PARTITION_DAY
# pid = hashlib.md5((day_id + device_os_type + active_type + grey_type + page_name).encode("utf8")).hexdigest()
# instert_sql = """replace into conent_detail_page_grayscale_ctr(
# day_id,device_os_type,active_type,grey_type,page_name,content_pv,content_uv,wel_exp_pv,
# content_exp_pv,wel_click_pv,content_click_pv,slide_wel_click_pv,self_wel_click_pv,partition_day,pid,meigou_ctr,neirong_ctr,
# grey_meigou_ctr,grey_neirong_ctr) VALUES('{day_id}','{device_os_type}','{active_type}','{grey_type}','{page_name}',{content_pv},{content_uv},
# {wel_exp_pv},{content_exp_pv},{wel_click_pv},{content_click_pv},{slide_wel_click_pv},{self_wel_click_pv},'{partition_day}','{pid}',{meigou_ctr},{neirong_ctr},{grey_meigou_ctr},{grey_neirong_ctr});""".format(
# day_id=day_id,device_os_type=device_os_type,active_type=active_type,grey_type=grey_type,page_name=page_name,
# content_pv=content_pv,content_uv=content_uv,wel_exp_pv=wel_exp_pv,content_exp_pv=content_exp_pv,wel_click_pv=wel_click_pv,
# content_click_pv=content_click_pv,slide_wel_click_pv=slide_wel_click_pv,self_wel_click_pv=self_wel_click_pv,meigou_ctr=meigou_ctr,neirong_ctr=neirong_ctr,
# partition_day=partition_day, pid=pid,grey_neirong_ctr=grey_neirong_ctr,grey_meigou_ctr=grey_meigou_ctr
# )
# print(instert_sql)
# # cursor.execute("set names 'UTF8'")
# res = cursor.execute(instert_sql)
# db.commit()
# print(res)
# # cursor.executemany()
db.close()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment