Commit 1b0e1f88 authored by litaolemo's avatar litaolemo

update

parent 5c396cea
# -*- coding:UTF-8 -*-
# @Time : 2020/8/31 13:41
# @File : advertisement_strategy_d.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
import json
import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import lit
import pytispark.pytispark as pti
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
def con_sql(sql):
# 从数据库的表里获取数据
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
db.close()
return result
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName(
"LR PYSPARK TEST").enableHiveSupport().getOrCreate())
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
task_list = []
task_days = 1
now = datetime.datetime.now()
partition_date_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
for t in range(0, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
# CPT日均点击
CPT_daily_click_sql = """SELECT partition_date,count(1) as pv
FROM online.bl_hdfs_maidian_updates
WHERE partition_date >= '${start_date}'
and partition_date < '${end_date}'
AND ((ACTION = 'search_result_welfare_click_item' AND PAGE_NAME = 'search_result_welfare' AND PARAMS['transaction_type'] = 'advertise')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'category' AND PARAMS['transaction_type'] = 'operating' AND PARAMS['tab_name'] = 'service')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'welfare_home_list_item' and PARAMS['transaction_type'] = 'advertise')
OR (ACTION = 'goto_welfare_detail' AND PARAMS['from'] = 'welfare_list' AND PARAMS['transaction_type'] = 'advertise')
OR (ACTION = 'on_click_card' AND PARAMS['card_content_type'] = 'service' AND PARAMS['page_name'] IN ('new_sign','search_result_welfare','category','welfare_home_list_item','welfare_list') AND PARAMS['transaction_type'] = 'advertise'))
group BY partition_date""".format(start_date=yesterday_str,end_date=today_str)
CPT_daily_click_df = spark.sql(CPT_daily_click_sql)
CPT_daily_click_df.createOrReplaceTempView("cpt_daily_click_df")
# 商详页PV
bus_detail_pv_sql = """SELECT
partition_date,count(1) welfare_pv
FROM
(
SELECT cl_id,partition_date
FROM bl_hdfs_maidian_updates
WHERE partition_date >='{start_date}'and partition_date < '{end_date}'
AND action='page_view'
AND params['page_name'] = 'welfare_detail'
)a1
JOIN
(
SELECT device_id,partition_date
from online.ml_device_day_active_status
WHERE partition_date >='{start_date}'and partition_date < '{end_date}'
AND active_type in ('1','2','4')
)a2
on a2.device_id = a1.cl_id
AND a2.partition_date=a1.partition_date
group by partition_date""".format(start_date=yesterday_str,end_date=today_str,)
bus_detail_pv_df = spark.sql(bus_detail_pv_sql)
bus_detail_pv_df.createOrReplaceTempView("bus_detail_pv_df")
# cpc当日预算(有效口径)
sql = """SELECT day_id,sum(budget) as budget
FROM
(
SELECT T1.day_id,T1.merchant_doctor_id,case when merchant_budget>=tot_service_budget then tot_service_budget else merchant_budget end as budget
FROM
(
SELECT
substr(clicklog.create_time,1,10) AS day_id
,clicklog.merchant_doctor_id
,max(merchant_budget) as merchant_budget --商户预算
FROM
(
SELECT id,promote_id,price,service_budget,merchant_budget,merchant_doctor_id,create_time,recharge
FROM online.tl_hdfs_cpc_clicklog_view
WHERE partition_date='${partition_date}'
AND regexp_replace(substr(create_time,1,10),'-','')>= '${start_date}'
AND regexp_replace(substr(create_time,1,10),'-','')<'${end_date}'
)clicklog
group by substr(clicklog.create_time,1,10),clicklog.merchant_doctor_id
)T1
LEFT JOIN
(
SELECT
day_id
,merchant_doctor_id
,sum(service_budget) as tot_service_budget
FROM
(
SELECT
substr(clicklog.create_time,1,10) AS day_id
,clicklog.merchant_doctor_id,clicklog.service_id
,max(service_budget) as service_budget
FROM
(
SELECT id,promote_id,price,service_budget,merchant_budget,merchant_doctor_id,service_id,create_time
FROM online.tl_hdfs_cpc_clicklog_view
WHERE partition_date='${partition_date}'
AND regexp_replace(substr(create_time,1,10),'-','')>= '${start_date}'
AND regexp_replace(substr(create_time,1,10),'-','')<'${end_date}'
)clicklog
GROUP BY substr(clicklog.create_time,1,10),clicklog.merchant_doctor_id,clicklog.service_id
)service_budget
GROUP BY day_id,merchant_doctor_id
)T2
ON T1.day_id=T2.day_id
AND T1.merchant_doctor_id=T2.merchant_doctor_id
)T
GROUP BY day_id
"""
device_df = spark.sql(sql)
device_df.show(1, False)
sql_res = device_df.collect()
print("-----------------------------------------------------------------------------")
for res in sql_res:
# print(res)
day_id = res.day_id
device_os_type = res.device_os_type
active_type = res.active_type
grey_type = res.grey_type
page_name = res.page_name
content_pv = res.content_pv
content_uv = res.content_uv
wel_exp_pv = res.wel_exp_pv
content_exp_pv = res.content_exp_pv
meigou_ctr=res.meigou_ctr
if not meigou_ctr: meigou_ctr = 0
grey_meigou_ctr=res.grey_meigou_ctr
neirong_ctr=res.neirong_ctr
if not neirong_ctr: neirong_ctr = 0
grey_neirong_ctr=res.grey_neirong_ctr
wel_click_pv = res.wel_click_pv
content_click_pv = res.content_click_pv
slide_wel_click_pv = res.slide_wel_click_pv
self_wel_click_pv = res.self_wel_click_pv
partition_day = res.PARTITION_DAY
pid = hashlib.md5((day_id + device_os_type + active_type + grey_type + page_name).encode("utf8")).hexdigest()
instert_sql = """replace into conent_detail_page_grayscale_ctr(
day_id,device_os_type,active_type,grey_type,page_name,content_pv,content_uv,wel_exp_pv,
content_exp_pv,wel_click_pv,content_click_pv,slide_wel_click_pv,self_wel_click_pv,partition_day,pid,meigou_ctr,neirong_ctr,
grey_meigou_ctr,grey_neirong_ctr) VALUES('{day_id}','{device_os_type}','{active_type}','{grey_type}','{page_name}',{content_pv},{content_uv},
{wel_exp_pv},{content_exp_pv},{wel_click_pv},{content_click_pv},{slide_wel_click_pv},{self_wel_click_pv},'{partition_day}','{pid}',{meigou_ctr},{neirong_ctr},{grey_meigou_ctr},{grey_neirong_ctr});""".format(
day_id=day_id,device_os_type=device_os_type,active_type=active_type,grey_type=grey_type,page_name=page_name,
content_pv=content_pv,content_uv=content_uv,wel_exp_pv=wel_exp_pv,content_exp_pv=content_exp_pv,wel_click_pv=wel_click_pv,
content_click_pv=content_click_pv,slide_wel_click_pv=slide_wel_click_pv,self_wel_click_pv=self_wel_click_pv,meigou_ctr=meigou_ctr,neirong_ctr=neirong_ctr,
partition_day=partition_day, pid=pid,grey_neirong_ctr=grey_neirong_ctr,grey_meigou_ctr=grey_meigou_ctr
)
print(instert_sql)
# cursor.execute("set names 'UTF8'")
res = cursor.execute(instert_sql)
db.commit()
print(res)
# cursor.executemany()
db.close()
\ No newline at end of file
......@@ -33,15 +33,15 @@ sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.sql.adaptive.enabled", True)
sparkConf.set("spark.sql.adaptive.skewedJoin.enabled", True)
# sparkConf.set("spark.sql.adaptive.enabled", True)
# sparkConf.set("spark.sql.adaptive.skewedJoin.enabled", True)
sparkConf.set("spark.shuffle.statistics.verbose", True)
sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "67108864")
sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleRowCount", "20000000")
# sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "67108864")
# sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleRowCount", "20000000")
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
# sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment