from_sparksql_to_mysql.py 5.57 KB
# -*- coding:UTF-8 -*-
# @Time  : 2020/8/19 11:53
# @File  : from_sparksql_to_mysql.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
import json

import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import lit
import pytispark.pytispark as pti

db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
                         db='jerry_prod')
cursor = db.cursor()

def con_sql(sql):
    # 从数据库的表里获取数据

    db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
                         db='jerry_prod')
    cursor = db.cursor()
    cursor.execute(sql)
    result = cursor.fetchall()
    db.close()
    return result


startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
              "jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
              "jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
              "jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
              "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")

spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
         .config("spark.tispark.pd.addresses", "172.16.40.170:2379").appName(
    "LR PYSPARK TEST").enableHiveSupport().getOrCreate())

spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")

select_sql = """SELECT *,NVL(ROUND(wel_click_pv/wel_exp_pv,6),0) as meigou_ctr,
    NVL(ROUND(content_click_pv/content_exp_pv,6),0) as neirong_ctr FROM pm.tl_pm_contentpage_ctr"""
device_df = spark.sql(select_sql)
device_df.show(1, False)
sql_res = device_df.collect()
print("-----------------------------------------------------------------------------")
for res in sql_res:
    # print(res)
    day_id = res.day_id
    device_os_type = res.device_os_type
    active_type = res.active_type
    grey_type = res.grey_type
    page_name = res.page_name
    content_pv = res.content_pv
    content_uv = res.content_uv
    wel_exp_pv = res.wel_exp_pv
    content_exp_pv = res.content_exp_pv
    meigou_ctr=res.meigou_ctr
    neirong_ctr=res.neirong_ctr

    wel_click_pv = res.wel_click_pv
    content_click_pv = res.content_click_pv
    slide_wel_click_pv = res.slide_wel_click_pv
    self_wel_click_pv = res.self_wel_click_pv
    partition_day = res.PARTITION_DAY
    pid = hashlib.md5((day_id + device_os_type + active_type + grey_type + page_name).encode("utf8")).hexdigest()
    instert_sql = """replace into conent_detail_page_grayscale_ctr(
    day_id,device_os_type,active_type,grey_type,page_name,content_pv,content_uv,wel_exp_pv,
    content_exp_pv,wel_click_pv,content_click_pv,slide_wel_click_pv,self_wel_click_pv,partition_day,pid,meigou_ctr,neirong_ctr
    ) VALUES('{day_id}','{device_os_type}','{active_type}','{grey_type}','{page_name}',{content_pv},{content_uv},
    {wel_exp_pv},{content_exp_pv},{wel_click_pv},{content_click_pv},{slide_wel_click_pv},{self_wel_click_pv},'{partition_day}','{pid}',{meigou_ctr},{neirong_ctr});""".format(
        day_id=day_id,device_os_type=device_os_type,active_type=active_type,grey_type=grey_type,page_name=page_name,
        content_pv=content_pv,content_uv=content_uv,wel_exp_pv=wel_exp_pv,content_exp_pv=content_exp_pv,wel_click_pv=wel_click_pv,
        content_click_pv=content_click_pv,slide_wel_click_pv=slide_wel_click_pv,self_wel_click_pv=self_wel_click_pv,meigou_ctr=meigou_ctr,neirong_ctr=neirong_ctr,
        partition_day=partition_day, pid=pid
    )
    print(instert_sql)
    # cursor.execute("set names 'UTF8'")
    res = cursor.execute(instert_sql)
    db.commit()
    print(res)
# cursor.executemany()
db.close()