Commit 08cd43e0 authored by 赵威's avatar 赵威

Merge branch 'offic' into 'master'

Offic

See merge request !58
parents adf38dfe 9adcba50
...@@ -5,6 +5,7 @@ import operator ...@@ -5,6 +5,7 @@ import operator
from collections import Counter from collections import Counter
import redis import redis
from pyspark import SparkConf from pyspark import SparkConf
from pyspark.sql import SparkSession from pyspark.sql import SparkSession
from tool import (get_jerry_test, get_tag3_user_log, send_email, write_user_portrait) from tool import (get_jerry_test, get_tag3_user_log, send_email, write_user_portrait)
...@@ -13,7 +14,8 @@ from tool import (get_jerry_test, get_tag3_user_log, send_email, write_user_port ...@@ -13,7 +14,8 @@ from tool import (get_jerry_test, get_tag3_user_log, send_email, write_user_port
# [{'激光': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}] # [{'激光': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}]
# {'手术': 5.8475846946146195, '激光': 1.949194898204873} # {'手术': 5.8475846946146195, '激光': 1.949194898204873}
def merge_values(list_of_dict): def merge_values(list_of_dict):
return dict(functools.reduce(operator.add, map(Counter, list_of_dict))) d = dict(functools.reduce(operator.add, map(Counter, list_of_dict)))
return dict(sorted(d.items(), key=lambda x: x[1], reverse=True))
# [("a", 1), ("b", 2)] # [("a", 1), ("b", 2)]
...@@ -110,6 +112,8 @@ def consume_kafka(): ...@@ -110,6 +112,8 @@ def consume_kafka():
db.close() db.close()
cursor.close() cursor.close()
# device_ids_lst = ["androidid_a25a1129c0b38f7b"]
sparkConf = SparkConf().set("spark.hive.mapred.supports.subdirectories", "true") \ sparkConf = SparkConf().set("spark.hive.mapred.supports.subdirectories", "true") \
.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", "true") \ .set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", "true") \
.set("spark.tispark.plan.allow_index_double_read", "false") \ .set("spark.tispark.plan.allow_index_double_read", "false") \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment