Commit dccbc937 authored by 张彦钊's avatar 张彦钊

修改repartion

parent 982b0e16
......@@ -32,7 +32,7 @@ def get_data():
validate_date = con_sql(db, sql)[0].values.tolist()[0]
print("validate_date:" + validate_date)
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=200)).strftime("%Y-%m-%d")
start = (temp - datetime.timedelta(days=100)).strftime("%Y-%m-%d")
print(start)
db = pymysql.connect(host='172.16.40.158', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
......
......@@ -297,7 +297,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
.map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16]))) \
.toDF("y","z","app_list", "level2_list", "level3_list","tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids").coalesce(1).write.format("tfrecords") \
"tag5_list", "tag6_list", "tag7_list", "ids").repartition(1).write.format("tfrecords") \
.save(path=path+"native/", mode="overwrite")
print("native tfrecord done")
h = time.time()
......@@ -314,7 +314,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
.map(
lambda x: (x[1], x[2], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15], x[16]))) \
.toDF("y", "z", "app_list", "level2_list", "level3_list", "tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids").coalesce(1).write.format("tfrecords") \
"tag5_list", "tag6_list", "tag7_list", "ids").repartition(1).write.format("tfrecords") \
.save(path=path + "nearby/", mode="overwrite")
print("nearby tfrecord done")
......
......@@ -5,17 +5,6 @@ MODEL_PATH=/srv/apps/ffm-baseline_git/tensnsorflow
LOCAL_PATH=/home/gmuser/esmm
HDFS_PATH=hdfs://172.16.32.4:8020/strategy/esmm
echo "rm old file"
/opt/hadoop/bin/hadoop fs -rm ${DATA_PATH}/tr/*
/opt/hadoop/bin/hadoop fs -rm ${DATA_PATH}/va/*
/opt/hadoop/bin/hadoop fs -rm ${DATA_PATH}/native/*
/opt/hadoop/bin/hadoop fs -rm ${DATA_PATH}/nearby/*
rm ${LOCAL_PATH}/*.csv
rm ${LOCAL_PATH}/native/*
rm ${LOCAL_PATH}/nearby/*
rm -r ${LOCAL_PATH}/model_ckpt/DeepCvrMTL/20*
echo "train..."
CLASSPATH="$(hadoop classpath --glob)" ${PYTHON_PATH} ${MODEL_PATH}/train_multi.py --ctr_task_wgt=0.5 --learning_rate=0.0001 --deep_layers=512,256,128,64,32 --dropout=0.3,0.3,0.3,0.3,0.3 --optimizer=Adam --num_epochs=1 --embedding_size=16 --batch_size=10000 --field_size=15 --feature_size=600000 --l2_reg=0.005 --log_steps=100 --num_threads=36 --model_dir=${LOCAL_PATH}/model_ckpt/DeepCvrMTL/ --local_dir=${LOCAL_PATH} --task_type=train
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment