Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
宋柯
meta_base_code
Commits
3427dbc6
Commit
3427dbc6
authored
Nov 26, 2020
by
litaolemo
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
ddc399bc
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
199 additions
and
0 deletions
+199
-0
new_user_project_analysis.py
new_user_analysis/new_user_project_analysis.py
+199
-0
No files found.
new_user_analysis/new_user_project_analysis.py
0 → 100644
View file @
3427dbc6
# -*- coding:UTF-8 -*-
# @Time : 2020/11/26 15:02
# @File : new_user_project_analysis.py
# @email : litao@igengmei.com
# @author : litao
import
hashlib
import
json
from
meta_base_code.utils.func_get_uesr_event
import
get_user_event_from_mysql
import
pymysql
import
xlwt
,
datetime
import
redis
# from pyhive import hive
from
maintenance.func_send_email_with_file
import
send_file_email
from
typing
import
Dict
,
List
# from elasticsearch_7 import Elasticsearch
# from elasticsearch_7.helpers import scan
from
elasticsearch
import
Elasticsearch
from
elasticsearch.helpers
import
scan
import
sys
import
time
from
pyspark
import
SparkConf
from
pyspark.sql
import
SparkSession
,
DataFrame
from
meta_base_code.utils.func_from_redis_get_portrait
import
*
import
pandas
as
pd
# from pyspark.sql.functions import lit
# import pytispark.pytispark as pti
def
con_sql
(
sql
):
# 从数据库的表里获取数据
# db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
# db='jerry_prod')
db
=
pymysql
.
connect
(
host
=
'172.16.30.136'
,
port
=
3306
,
user
=
'doris'
,
passwd
=
'o5gbA27hXHHm'
,
db
=
'doris_prod'
)
cursor
=
db
.
cursor
()
cursor
.
execute
(
sql
)
result
=
cursor
.
fetchall
()
db
.
close
()
return
result
exists_es_dic
=
{}
es
=
Elasticsearch
([
{
'host'
:
'172.16.31.17'
,
'port'
:
9200
,
},
{
'host'
:
'172.16.31.11'
,
'port'
:
9200
,
}])
startTime
=
time
.
time
()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
"jdbc:mysql://172.16.30.138/mimas_prod?user=mimas&password=GJL3UJe1Ck9ggL6aKnZCq4cRvM&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.gaia.jdbcuri"
,
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tidb.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.jerry.jdbcuri"
,
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
sparkConf
.
set
(
"prod.tidb.database"
,
"jerry_prod"
)
sparkConf
.
setAppName
(
"new_user_project_protratit"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
task_list
=
[]
task_days
=
2
tractate_list
=
[]
# res = pd.DataFrame(answer_list)
# res.to_csv("answer_list.csv",encoding="gb18030")
# send_file_email("", '', sender="litao@igengmei.com", email_group=["litao@igengmei.com"], email_msg_body_str="test",
# title_str="test", cc_group=["litao@igengmei.com"], file="/srv/apps/meta_base_code/answer_list.csv")
for
t
in
range
(
1
,
task_days
):
day_num
=
0
-
t
now
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
))
last_30_day_str
=
(
now
+
datetime
.
timedelta
(
days
=-
30
))
.
strftime
(
"
%
Y
%
m
%
d"
)
tomorrow_str
=
(
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
days
=
day_num
+
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
today_str
=
now
.
strftime
(
"
%
Y
%
m
%
d"
)
today_str_format
=
now
.
strftime
(
"
%
Y-
%
m-
%
d"
)
yesterday_str
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y
%
m
%
d"
)
yesterday_str_format
=
(
now
+
datetime
.
timedelta
(
days
=-
1
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
one_week_age_str
=
(
now
+
datetime
.
timedelta
(
days
=-
7
))
.
strftime
(
"
%
Y
%
m
%
d"
)
new_urser_device_id_sql
=
r"""
select t2.device_id as device_id from
(select device_id from online.ml_device_day_active_status where partition_date = '{today_str}' and active_type in (1,2))
LEFT JOIN
(
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY = '{today_str}'
AND is_abnormal_device = 'true'
)dev
on t2.device_id=dev.device_id
WHERE dev.device_id is null and first_device is not null
"""
.
format
(
today_str
=
today_str
,
yesterday_str_format
=
yesterday_str_format
,
today_str_format
=
today_str_format
,
tomorrow_str
=
tomorrow_str
)
print
(
new_urser_device_id_sql
)
new_urser_device_id_df
=
spark
.
sql
(
new_urser_device_id_sql
)
new_urser_device_id_df
.
createOrReplaceTempView
(
"device_id_view"
)
new_urser_device_id_df
.
show
(
1
)
sql_res
=
new_urser_device_id_df
.
collect
()
res_dict
=
{}
portrait_dict
=
{
"first_demands"
:
{},
"second_demands"
:
{},
"first_solutions"
:
{},
"second_solutions"
:
{},
"first_positions"
:
{},
"second_positions"
:
{},
"projects"
:
{},
'anecdote_tags'
:
{}
}
no_portrait_device_id_list
=
[]
print
(
"-------------------------------"
)
count_not_has_portratit
=
0
event_dict
=
{}
for
count_user_count
,
res
in
enumerate
(
sql_res
):
# print(count, res)
temp_count
=
0
try
:
for
event_cn
,
projects
in
get_user_event_from_mysql
(
res
.
device_id
):
project_list
=
projects
.
split
(
","
)
for
project
in
project_list
:
if
project
not
in
event_dict
:
event_dict
[
project
]
=
{}
if
event_dict
[
project
]
.
get
(
event_cn
):
event_dict
[
project
][
event_cn
]
+=
1
else
:
event_dict
[
project
][
event_cn
]
=
1
except
Exception
as
e
:
print
(
"error "
,
e
)
temp_count
+=
1
if
not
temp_count
:
count_not_has_portratit
+=
1
no_portrait_device_id_list
.
append
(
res
.
device_id
)
# print(portrait_dict)
# print(count_user_count + 1, count_not_has_portratit)
# print("-------------------------------")
print
(
"event_dict"
,
event_dict
)
# for protratit_type in portrait_dict["projects"]:
# partition_date = today_str
# pid = hashlib.md5((partition_date + protratit_type).encode("utf8")).hexdigest()
# action_count = portrait_dict["projects"][protratit_type]
# answer_count = answer_dict.get(protratit_type, 0)
# tractate_count = tractate_dict.get(protratit_type, 0)
# total_count = answer_count + tractate_count
#
# instert_sql = """replace into new_user_project_count(
# partition_day,pid,protratit_count,protratit_type,answer_count,tractate_count,total_count) VALUES('{partition_day}','{pid}',{protratit_count},'{protratit_type}',{answer_count},{tractate_count},{total_count});""".format(
# partition_day=today_str, pid=pid, protratit_count=action_count
# , protratit_type=protratit_type, answer_count=answer_count, tractate_count=tractate_count,
# total_count=total_count
# )
# print(instert_sql)
# cursor.execute("set names 'UTF8'")
# db = pymysql.connect(host='172.16.40.158', port=4000, user='st_user', passwd='aqpuBLYzEV7tML5RPsN1pntUzFy',
# db='jerry_prod')
# cursor = db.cursor()
# res = cursor.execute(instert_sql)
# db.commit()
# print(res)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment