Commit 240bae33 authored by litaolemo's avatar litaolemo

update

parent 6e3c04bb
...@@ -4,24 +4,6 @@ ...@@ -4,24 +4,6 @@
# @email : litao@igengmei.com # @email : litao@igengmei.com
# @author : litao # @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/9/15 16:17
# @File : user_behavior_path.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/9/8 13:39
# @File : spark_test.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/9/4 17:07
# @File : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao
import hashlib import hashlib
import json import json
......
# -*- coding:UTF-8 -*-
# @Time : 2020/11/24 9:48
# @File : new_user_project_protratit.py
# @email : litao@igengmei.com
# @author : litao
import hashlib
import json
import pymysql
import xlwt, datetime
import redis
# from pyhive import hive
from maintenance.func_send_email_with_file import send_file_email
from typing import Dict, List
from elasticsearch_7 import Elasticsearch
from elasticsearch_7.helpers import scan
import sys
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
from meta_base_code.utils.func_from_redis_get_portrait import *
# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
def con_sql(sql):
# 从数据库的表里获取数据
# db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
# db='jerry_prod')
db = pymysql.connect(host='172.16.30.136', port=3306, user='doris', passwd='o5gbA27hXHHm',
db='doris_prod')
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
db.close()
return result
exists_es_dic = {}
es = Elasticsearch([
{
'host': '172.16.31.17',
'port': 9200,
}, {
'host': '172.16.31.11',
'port': 9200,
}])
startTime = time.time()
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri",
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true")
sparkConf.set("prod.mimas.jdbcuri",
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true")
sparkConf.set("prod.gaia.jdbcuri",
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true")
sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf.setAppName("new_user_project_protratit")
spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")
.config("spark.tispark.pd.addresses", "172.16.40.170:2379").enableHiveSupport().getOrCreate())
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
task_list = []
task_days = 3
es_query_tractate = {"query": {
"bool": {
"must": [
{"term": {"is_online": True}},
{"range": {"content_level": {"gte": 3}}},
{"terms": {"operators_add_tags": [3315]}}
],
"must_not": [{"term": {"status": 4}},
{"term": {"show_by_index": 2}}
]
}
}
}
tractate_res_scan = scan(es,es_query_tractate,index="gm-dbmw-tractate-read")
tractate_dict = {}
for tractate_json in tractate_res_scan:
_id = tractate_json["_id"]
content_keyword_list = tractate_json["source"]["content_keyword"]
for content_keyword in content_keyword_list:
if content_keyword in tractate_dict:
tractate_dict[content_keyword] += 1
else:
tractate_dict[content_keyword] = 1
es_query_answer = {"query": {
"bool": {
"must": [
{"term": {"is_online": True}},
{"range": {"content_level": {"gte": 3}}},
{"terms": {"operators_add_tags": [3315]}},
{"range": {"content_length": {"gte": 30}}}
]
}
}
}
answer_res_scan = scan(es,es_query_tractate,index="gm-dbmw-answer-read")
answer_dict = {}
for answer_json in answer_res_scan:
_id = answer_json["_id"]
content_keyword_list = answer_json["source"]["content_keyword"]
for content_keyword in content_keyword_list:
if content_keyword in tractate_dict:
answer_dict[content_keyword] += 1
else:
answer_dict[content_keyword] = 1
for t in range(2, task_days):
day_num = 0 - t
now = (datetime.datetime.now() + datetime.timedelta(days=day_num))
last_30_day_str = (now + datetime.timedelta(days=-30)).strftime("%Y%m%d")
tomorrow_str = (datetime.datetime.now() + datetime.timedelta(days=day_num + 1)).strftime("%Y%m%d")
today_str = now.strftime("%Y%m%d")
today_str_format = now.strftime("%Y-%m-%d")
yesterday_str = (now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
yesterday_str_format = (now + datetime.timedelta(days=-1)).strftime("%Y-%m-%d")
one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
new_urser_device_id_sql = r"""
select t2.device_id as device_id from
(select device_id from online.ml_device_day_active_status where partition_date = '{today_str}' and active_type in (1,2)) t2
LEFT join (
select first_device from online.ml_user_history_detail where partition_date = '{tomorrow_str}' and last_active_date = '{today_str}'
) on first_device = t2.device_id
LEFT JOIN
(
select distinct device_id
from ml.ml_d_ct_dv_devicespam_d --去除机构刷单设备,即作弊设备(浏览和曝光事件去除)
WHERE partition_day='{today_str}'
union all
select distinct device_id
from dim.dim_device_user_staff --去除内网用户
)spam_pv
on spam_pv.device_id=t2.device_id
LEFT JOIN
(
SELECT partition_date,device_id
FROM
(--找出user_id当天活跃的第一个设备id
SELECT user_id,partition_date,
if(size(device_list) > 0, device_list [ 0 ], '') AS device_id
FROM online.ml_user_updates
WHERE partition_date='{today_str}'
)t1
JOIN
( --医生账号
SELECT distinct user_id
FROM online.tl_hdfs_doctor_view
WHERE partition_date = '{today_str}'
--马甲账号/模特用户
UNION ALL
SELECT user_id
FROM ml.ml_c_ct_ui_user_dimen_d
WHERE partition_day = '{today_str}'
AND (is_puppet = 'true' or is_classifyuser = 'true')
UNION ALL
--公司内网覆盖用户
select distinct user_id
from dim.dim_device_user_staff
UNION ALL
--登陆过医生设备
SELECT distinct t1.user_id
FROM
(
SELECT user_id, v.device_id as device_id
FROM online.ml_user_history_detail
LATERAL VIEW EXPLODE(device_history_list) v AS device_id
WHERE partition_date = '{today_str}'
) t1
JOIN
(
SELECT device_id
FROM online.ml_device_history_detail
WHERE partition_date = '{today_str}'
AND is_login_doctor = '1'
) t2
ON t1.device_id = t2.device_id
)t2
on t1.user_id=t2.user_id
group by partition_date,device_id
)dev
on t2.device_id=dev.device_id
WHERE spam_pv.device_id IS NULL
and dev.device_id is null and first_device is not null
""".format(today_str=today_str, yesterday_str_format=yesterday_str_format, today_str_format=today_str_format,
tomorrow_str=tomorrow_str)
print(new_urser_device_id_sql)
new_urser_device_id_df = spark.sql(new_urser_device_id_sql)
new_urser_device_id_df.createOrReplaceTempView("device_id_view")
new_urser_device_id_df.show(1)
sql_res = new_urser_device_id_df.collect()
res_dict = {}
portrait_dict = {
"first_demands": {},
"second_demands": {},
"first_solutions": {},
"second_solutions": {},
"first_positions": {},
"second_positions": {},
"projects": {},
'anecdote_tags': {}
}
no_portrait_device_id_list = []
print("-------------------------------")
count_not_has_portratit = 0
for count_user_count, res in enumerate(sql_res):
# print(count, res)
portratit_res = get_user_portrait_tag3_from_redis(res.device_id)
sql = """select cl_id, projects from kafka_tag3_log
where cl_id = '%s' and event_cn = 'kyc' """ % res.device_id
# print(count_user_count, res, portratit_res)
sql_res_list = con_sql(sql)
kyc_str_list = []
if sql_res_list:
print(sql_res_list, type(sql_res_list))
kyc_str_list = sql_res_list[0][1].split(",")
temp_count = 0
for demand in portratit_res:
if portratit_res[demand]:
try:
for tag in portratit_res[demand][0:3]:
if tag in portrait_dict[demand]:
portrait_dict[demand][tag] += 1
else:
portrait_dict[demand][tag] = 1
if tag in kyc_str_list and demand == "projects":
if portrait_dict["projects"].get(tag):
portrait_dict["projects"][tag] -= 1
except Exception as e:
print("error ", e)
temp_count += 1
if not temp_count:
count_not_has_portratit += 1
no_portrait_device_id_list.append(res.device_id)
print(portrait_dict)
print(count_user_count + 1, count_not_has_portratit)
print("-------------------------------")
for protratit_type in portrait_dict["projects"]:
partition_date = today_str
pid = hashlib.md5((partition_date + protratit_type).encode("utf8")).hexdigest()
action_count = portrait_dict["projects"][protratit_type]
answer_count = answer_dict.get(protratit_type,0)
tractate_count = tractate_dict.get(protratit_type,0)
total_count = answer_count + tractate_count
instert_sql = """replace into new_user_project_count(
partition_day,pid,protratit_count,protratit_type,answer_count,tractate_count,total_count) VALUES('{partition_day}','{pid}',{protratit_count},'{protratit_type}',{answer_count},{tractate_count},{total_count});""".format(
partition_day=today_str, pid=pid, protratit_count=action_count
, protratit_type=protratit_type, answer_count=answer_count, tractate_count=tractate_count, total_count=total_count
)
print(instert_sql)
# cursor.execute("set names 'UTF8'")
db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
db='jerry_prod')
cursor = db.cursor()
res = cursor.execute(instert_sql)
db.commit()
print(res)
...@@ -5,18 +5,6 @@ ...@@ -5,18 +5,6 @@
# @author : litao # @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/9/8 13:39
# @File : spark_test.py
# @email : litao@igengmei.com
# @author : litao
# -*- coding:UTF-8 -*-
# @Time : 2020/9/4 17:07
# @File : search_meigou_ctr.py
# @email : litao@igengmei.com
# @author : litao
import hashlib import hashlib
import json import json
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment