Commit 71e42e09 authored by 赵威's avatar 赵威

send email

parent 020e8d40
......@@ -8,7 +8,7 @@ import redis
from pyspark import SparkConf
from pyspark.sql import SparkSession
from tool import get_jerry_test, get_tag3_user_log, write_user_portrait
from tool import (get_jerry_test, get_tag3_user_log, send_email, write_user_portrait)
# [{'激光': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}, {'手术': 1.949194898204873}]
......@@ -114,15 +114,18 @@ def consume_kafka():
.set("spark.driver.maxResultSize", "8g") \
.set("spark.sql.avro.compression.codec", "snappy")
spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()
spark.sparkContext.setLogLevel("WARN")
spark.sparkContext.addPyFile("/srv/apps/ffm-baseline_git/eda/smart_rank/tool.py")
device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst, numSlices=1000)
result = device_ids_lst_rdd.repartition(100).map(lambda x: update_tag3_user_portrait(x))
# result.foreach(print)
result.collect()
spark.stop()
try:
spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()
spark.sparkContext.setLogLevel("WARN")
spark.sparkContext.addPyFile("/srv/apps/ffm-baseline_git/eda/smart_rank/tool.py")
device_ids_lst_rdd = spark.sparkContext.parallelize(device_ids_lst, numSlices=1000)
result = device_ids_lst_rdd.repartition(100).map(lambda x: update_tag3_user_portrait(x))
# result.foreach(print)
result.collect()
spark.stop()
except Exception as e:
send_email("tag3_update_user_portrait_offline", "tag3_update_user_portrait_offline", e)
if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment