Commit b4bfaa68 authored by 张彦钊's avatar 张彦钊

change test file

parent 7eb0395f
import pandas as pd
# -*- coding: utf-8 -*-
import pymysql
from pyspark.conf import SparkConf
import pytispark.pytispark as pti
from pyspark.sql import SparkSession
import datetime
import tensorflow as tf
import pandas as pd
def con_sql(db,sql):
cursor = db.cursor()
try:
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(list(result))
except Exception:
print("发生异常", Exception)
df = pd.DataFrame()
finally:
db.close()
return df
def app_list_func(x,l):
b = str(x).split(",")
e = []
for i in b:
if i in l.keys():
e.append(l[i])
else:
e.append(0)
return e
def get_data():
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
def multi_hot(df,column,n):
v = df.select(column).distinct().rdd.map(lambda x: x[0]).collect()
app_list_value = [str(i).split(",") for i in v]
app_list_unique = []
for i in app_list_value:
app_list_unique.extend(i)
app_list_unique = list(set(app_list_unique))
number = len(app_list_unique)
app_list_map = dict(zip(app_list_unique, list(range(n, number + n))))
return number,app_list_map
def feature_engineer():
db = pymysql.connect(host='172.16.40.158', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select max(stat_date) from esmm_train_data"
validate_date = con_sql(db, sql)[0].values.tolist()[0]
print("validate_date:" + validate_date)
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=30)).strftime("%Y-%m-%d")
start = (temp - datetime.timedelta(days=2)).strftime("%Y-%m-%d")
print(start)
db = pymysql.connect(host='10.66.157.22', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select e.y,e.z,e.stat_date,e.ucity_id,e.clevel1_id,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel,c.top,cid_time.time " \
"from esmm_train_data e left join user_feature u on e.device_id = u.device_id " \
"left join cid_type_top c on e.device_id = c.device_id left join cid_time on e.cid_id = cid_time.cid_id " \
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
"u.channel,c.top,cut.time,dl.app_list,feat.level3_ids,doctor.hospital_id," \
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4," \
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7," \
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time " \
"from jerry_test.esmm_train_data e left join jerry_test.user_feature u on e.device_id = u.device_id " \
"left join jerry_test.cid_type_top c on e.device_id = c.device_id " \
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid " \
"left join jerry_test.device_app_list dl on e.device_id = dl.device_id " \
"left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id " \
"left join jerry_test.train_Knowledge_network_data k on feat.level2 = k.level2_id " \
"left join jerry_test.wiki_tag wiki on e.device_id = wiki.device_id" \
"left join jerry_test.question_tag question on e.device_id = question.device_id " \
"left join jerry_test.search_tag search on e.device_id = search.device_id " \
"left join jerry_test.budan_tag budan on e.device_id = budan.device_id " \
"left join jerry_test.order_tag ot on e.device_id = ot.device_id " \
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id " \
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id " \
"left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id " \
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id " \
"where e.stat_date >= '{}'".format(start)
df = con_sql(db, sql)
print(df.shape)
df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel1_id", 5: "ccity_name",
6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "time"})
print("esmm data ok")
print(df.head(2))
df = df.fillna("na")
print(df.count())
ucity_id = {v:i for i,v in df["ucity_id"].unique()}
clevel1_id = {v:i for i,v in df["clevel1_id"].unique()}
ccity_name = {v:i for i,v in df["ccity_name"].unique()}
device_type = {v:i for i,v in df["device_type"].unique()}
manufacturer = {v:i for i,v in df["manufacturer"].unique()}
channel = {v:i for i,v in df["channel"].unique()}
top = {v:i for i,v in df["top"].unique()}
time = {v:i for i,v in df["time"].unique()}
df["ucity_id"] = df["ucity_id"].map(ucity_id)
df["clevel1_id"] = df["clevel1_id"].map(clevel1_id)
df["ccity_name"] = df["ccity_name"].map(ccity_name)
df["device_type"] = df["device_type"].map(device_type)
df["manufacturer"] = df["manufacturer"].map(manufacturer)
df["channel"] = df["channel"].map(channel)
df["top"] = df["top"].map(top)
df["time"] = df["time"].map(time)
train = df.loc[df["stat_date"] == validate_date]
test = df.loc[df["stat_date"] != validate_date]
features = ["ucity_id","clevel1_id","ccity_name","device_type","manufacturer","channel","top","time"]
train_values = train[features].values
train_labels = train[["y","z"]].values
test_values = test[features].values
test_labels = test[["y","z"]].values
ucity_id_max = len(ucity_id)
clevel1_id_max = len(clevel1_id)
ccity_name_max = len(ccity_name)
device_type_max = len(device_type)
manufacturer_max = len(manufacturer)
channel_max = len(channel)
top_max = len(top)
time_max = len(time)
return train_values,train_labels,test_values,test_labels,ucity_id_max,clevel1_id_max,ccity_name_max,\
device_type_max,manufacturer_max,channel_max,top_max,time_max
def get_inputs():
ucity_id = tf.placeholder(tf.int32, [None, 1], name="ucity_id")
clevel1_id = tf.placeholder(tf.int32, [None, 1], name="clevel1_id")
ccity_name = tf.placeholder(tf.int32, [None, 1], name="ccity_name")
device_type = tf.placeholder(tf.int32, [None, 1], name="device_type")
manufacturer = tf.placeholder(tf.int32, [None, 1], name="manufacturer")
channel = tf.placeholder(tf.int32, [None, 1], name="channel")
top = tf.placeholder(tf.int32, [None, 1], name="top")
time = tf.placeholder(tf.int32, [None, 1], name="time")
targets = tf.placeholder(tf.float32, [None, 2], name="targets")
LearningRate = tf.placeholder(tf.float32, name="LearningRate")
return ucity_id,clevel1_id,ccity_name,device_type,manufacturer,channel,top,time,targets,LearningRate
def define_embedding_layers(combiner,embed_dim,ucity_id, ucity_id_max, clevel1_id_max,clevel1_id,
ccity_name_max,ccity_name,device_type_max,device_type,manufacturer_max,
manufacturer,channel,channel_max,top,top_max,time,time_max):
ucity_id_embed_matrix = tf.Variable(tf.random_normal([ucity_id_max, embed_dim], 0, 0.001))
ucity_id_embed_layer = tf.nn.embedding_lookup(ucity_id_embed_matrix, ucity_id)
if combiner == "sum":
ucity_id_embed_layer = tf.reduce_sum(ucity_id_embed_layer, axis=1, keep_dims=True)
clevel1_id_embed_matrix = tf.Variable(tf.random_uniform([clevel1_id_max, embed_dim], 0, 0.001))
clevel1_id_embed_layer = tf.nn.embedding_lookup(clevel1_id_embed_matrix, clevel1_id)
if combiner == "sum":
clevel1_id_embed_layer = tf.reduce_sum(clevel1_id_embed_layer, axis=1, keep_dims=True)
ccity_name_embed_matrix = tf.Variable(tf.random_uniform([ccity_name_max, embed_dim], 0, 0.001))
ccity_name_embed_layer = tf.nn.embedding_lookup(ccity_name_embed_matrix,ccity_name)
if combiner == "sum":
ccity_name_embed_layer = tf.reduce_sum(ccity_name_embed_layer, axis=1, keep_dims=True)
device_type_embed_matrix = tf.Variable(tf.random_uniform([device_type_max, embed_dim], 0, 0.001))
device_type_embed_layer = tf.nn.embedding_lookup(device_type_embed_matrix, device_type)
if combiner == "sum":
device_type_embed_layer = tf.reduce_sum(device_type_embed_layer, axis=1, keep_dims=True)
manufacturer_embed_matrix = tf.Variable(tf.random_uniform([manufacturer_max, embed_dim], 0, 0.001))
manufacturer_embed_layer = tf.nn.embedding_lookup(manufacturer_embed_matrix, manufacturer)
if combiner == "sum":
manufacturer_embed_layer = tf.reduce_sum(manufacturer_embed_layer, axis=1, keep_dims=True)
channel_embed_matrix = tf.Variable(tf.random_uniform([channel_max, embed_dim], 0, 0.001))
channel_embed_layer = tf.nn.embedding_lookup(channel_embed_matrix, channel)
if combiner == "sum":
channel_embed_layer = tf.reduce_sum(channel_embed_layer, axis=1, keep_dims=True)
top_embed_matrix = tf.Variable(tf.random_uniform([top_max, embed_dim], 0, 0.001))
top_embed_layer = tf.nn.embedding_lookup(top_embed_matrix, top)
if combiner == "sum":
top_embed_layer = tf.reduce_sum(top_embed_layer, axis=1, keep_dims=True)
time_embed_matrix = tf.Variable(tf.random_uniform([time_max, embed_dim], 0, 0.001))
time_embed_layer = tf.nn.embedding_lookup(time_embed_matrix, time)
if combiner == "sum":
time_embed_layer = tf.reduce_sum(time_embed_layer, axis=1, keep_dims=True)
esmm_embedding_layer = tf.concat([ucity_id_embed_layer, clevel1_id_embed_layer,ccity_name_embed_layer,
device_type_embed_layer,manufacturer_embed_layer,channel_embed_layer,
top_embed_layer,time_embed_layer], axis=1)
esmm_embedding_layer = tf.reshape(esmm_embedding_layer, [-1, embed_dim * 8])
return esmm_embedding_layer
def define_ctr_layer(esmm_embedding_layer):
ctr_layer_1 = tf.layers.dense(esmm_embedding_layer, 200, activation=tf.nn.relu)
ctr_layer_2 = tf.layers.dense(ctr_layer_1, 80, activation=tf.nn.relu)
ctr_layer_3 = tf.layers.dense(ctr_layer_2, 2) # [nonclick, click]
ctr_prob = tf.nn.softmax(ctr_layer_3) + 0.00000001
return ctr_prob
def define_cvr_layer(esmm_embedding_layer):
cvr_layer_1 = tf.layers.dense(esmm_embedding_layer, 200, activation=tf.nn.relu)
cvr_layer_2 = tf.layers.dense(cvr_layer_1, 80, activation=tf.nn.relu)
cvr_layer_3 = tf.layers.dense(cvr_layer_2, 2) # [nonbuy, buy]
cvr_prob = tf.nn.softmax(cvr_layer_3) + 0.00000001
return cvr_prob
def define_ctr_cvr_layer(esmm_embedding_layer):
layer_1 = tf.layers.dense(esmm_embedding_layer, 128 , activation=tf.nn.relu)
layer_2 = tf.layers.dense(layer_1, 16, activation=tf.nn.relu)
layer_3 = tf.layers.dense(layer_2, 2)
ctr_prob = tf.nn.softmax(layer_3) + 0.00000001
cvr_prob = tf.nn.softmax(layer_3) + 0.00000001
return ctr_prob, cvr_prob
df = spark.sql(sql)
df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids",
"tag1","tag2","tag3","tag4","tag5","tag6","tag7"])
features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date", "hospital_id",
"treatment_method", "price_min", "price_max", "treatment_time", "maintain_time", "recover_time"]
df = df.na.fill(dict(zip(features, features)))
apps_number, app_list_map = multi_hot(df, "app_list", 1)
level2_number, leve2_map = multi_hot(df, "level2_ids", 1 + apps_number)
level3_number, leve3_map = multi_hot(df, "level3_ids", 1 + apps_number + level2_number)
unique_values = []
for i in features:
unique_values.extend(df.select(i).distinct().rdd.map(lambda x: x[0]).collect())
temp = list(range(2 + apps_number + level2_number + level3_number,
2 + apps_number + level2_number + level3_number + len(unique_values)))
value_map = dict(zip(unique_values, temp))
rdd = df.select("stat_date","y", "z","app_list","level2_ids","level3_ids",
"tag1","tag2","tag3","tag4","tag5","tag6","tag7",
"ucity_id", "ccity_name","device_type", "manufacturer", "channel", "top", "time",
"hospital_id","treatment_method", "price_min", "price_max", "treatment_time",
"maintain_time","recover_time").rdd
rdd.persist()
# TODO 上线后把下面train fliter 删除,因为最近一天的数据也要作为训练集
train = rdd.filter(lambda x: x[0] != validate_date) \
.map(lambda x: (float(x[1]),float(x[2]),app_list_func(x[3], app_list_map), app_list_func(x[4], leve2_map),
app_list_func(x[5], leve3_map), app_list_func(x[6], leve2_map),app_list_func(x[7], leve2_map),
app_list_func(x[8], leve2_map), app_list_func(x[9], leve2_map),app_list_func(x[10], leve2_map),
app_list_func(x[11], leve2_map),app_list_func(x[12], leve2_map),
[value_map[x[0]], value_map[x[13]],value_map[x[14]], value_map[x[15]], value_map[x[16]],
value_map[x[17]],value_map[x[18]], value_map[x[19]], value_map[x[20]],value_map[x[21]],
value_map[x[22]], value_map[x[23]], value_map[x[24]],value_map[x[25]],value_map[x[26]]]))
spark.createDataFrame(train).toDF("y","z","app_list","level2_list","level3_list",
"tag1_list","tag2_list","tag3_list","tag4_list",
"tag5_list","tag6_list","tag7_list","ids") \
.coalesce(1).write.format("tfrecords").save(path=path + "tr/", mode="overwrite")
print("train tfrecord done")
test = rdd.filter(lambda x: x[0] == validate_date) \
.map(lambda x: (float(x[1]), float(x[2]), app_list_func(x[3], app_list_map), app_list_func(x[4], leve2_map),
app_list_func(x[5], leve3_map), app_list_func(x[6], leve2_map), app_list_func(x[7], leve2_map),
app_list_func(x[8], leve2_map), app_list_func(x[9], leve2_map), app_list_func(x[10], leve2_map),
app_list_func(x[11], leve2_map), app_list_func(x[12], leve2_map),
[value_map[x[0]], value_map[x[13]], value_map[x[14]], value_map[x[15]], value_map[x[16]],
value_map[x[17]], value_map[x[18]], value_map[x[19]], value_map[x[20]], value_map[x[21]],
value_map[x[22]], value_map[x[23]], value_map[x[24]], value_map[x[25]], value_map[x[26]]]))
spark.createDataFrame(test).toDF("y", "z", "app_list", "level2_list", "level3_list",
"tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids") \
.coalesce(1).write.format("tfrecords").save(path=path+"va/", mode="overwrite")
print("va tfrecord done")
rdd.unpersist()
return validate_date,value_map,app_list_map,leve2_map,leve3_map
def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
sql = "select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name," \
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time," \
"dl.app_list,e.hospital_id,feat.level3_ids," \
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4," \
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7," \
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time " \
"from jerry_test.esmm_pre_data e " \
"left join jerry_test.user_feature u on e.device_id = u.device_id " \
"left join jerry_test.cid_type_top c on e.device_id = c.device_id " \
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid " \
"left join jerry_test.device_app_list dl on e.device_id = dl.device_id " \
"left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id " \
"left join jerry_test.wiki_tag wiki on e.device_id = wiki.device_id" \
"left join jerry_test.question_tag question on e.device_id = question.device_id " \
"left join jerry_test.search_tag search on e.device_id = search.device_id " \
"left join jerry_test.budan_tag budan on e.device_id = budan.device_id " \
"left join jerry_test.order_tag ot on e.device_id = ot.device_id " \
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id " \
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id " \
"left join jerry_test.train_Knowledge_network_data k on feat.level2 = k.level2_id"
features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "hospital_id",
"treatment_method", "price_min", "price_max", "treatment_time", "maintain_time", "recover_time"]
df = spark.sql(sql)
df = df.na.fill(dict(zip(features, features)))
rdd = df.select("label", "y", "z","ucity_id","device_id","cid_id","app_list", "level2_ids", "level3_ids",
"tag1", "tag2", "tag3", "tag4", "tag5", "tag6", "tag7",
"ucity_id", "ccity_name", "device_type", "manufacturer", "channel", "top", "time",
"hospital_id", "treatment_method", "price_min", "price_max", "treatment_time",
"maintain_time", "recover_time") \
.rdd.map(lambda x: (x[0],float(x[1]),float(x[2]),x[3],x[4],x[5],
app_list_func(x[6], app_list_map),app_list_func(x[7], leve2_map),
app_list_func(x[8], leve3_map), app_list_func(x[9], leve2_map),
app_list_func(x[10], leve2_map),app_list_func(x[11], leve2_map),
app_list_func(x[12], leve2_map), app_list_func(x[13], leve2_map),
app_list_func(x[14], leve2_map), app_list_func(x[15], leve2_map),
[value_map.get(date, 299999),value_map.get(x[16], 299998),
value_map.get(x[17], 299997),value_map.get(x[18], 299996),
value_map.get(x[19], 299995), value_map.get(x[20], 299994),
value_map.get(x[21], 299993), value_map.get(x[22], 299992),
value_map.get(x[23], 299991), value_map.get(x[24], 299990),
value_map.get(x[25], 299989), value_map.get(x[26], 299988),
value_map.get(x[27], 299987), value_map.get(x[28], 299986),
value_map.get(x[29], 299985)
]))
rdd.persist()
native_pre = spark.createDataFrame(rdd.filter(lambda x:x[0] == 0).map(lambda x:(x[3],x[4],x[5])))\
.toDF("city","uid","cid_id")
print("native csv")
native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
# TODO 写成csv文件改成下面这样
# native_pre.coalesce(1).write.format('com.databricks.spark.csv').save(path+"native/",header = 'true')
# 预测的tfrecord必须写成一个文件,这样可以摆保证顺序
spark.createDataFrame(rdd.filter(lambda x: x[0] == 0)
.map(lambda x: (x[1],x[2],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16]))) \
.toDF("y","z","app_list", "level2_list", "level3_list","tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids").coalesce(1).write.format("tfrecords") \
.save(path=path+"native/", mode="overwrite")
print("native tfrecord done")
native_pre = spark.createDataFrame(rdd.filter(lambda x: x[0] == 1).map(lambda x: (x[3], x[4], x[5]))) \
.toDF("city", "uid", "cid_id")
print("nearby csv")
native_pre.toPandas().to_csv(local_path + "nearby.csv", header=True)
# TODO 写成csv文件改成下面这样
# nearby_pre.coalesce(1).write.format('com.databricks.spark.csv').save(path+"nearby/",header = 'true')
spark.createDataFrame(rdd.filter(lambda x: x[0] == 1)
.map(
lambda x: (x[1], x[2], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15], x[16]))) \
.toDF("y", "z", "app_list", "level2_list", "level3_list", "tag1_list", "tag2_list", "tag3_list", "tag4_list",
"tag5_list", "tag6_list", "tag7_list", "ids").coalesce(1).write.format("tfrecords") \
.save(path=path + "nearby/", mode="overwrite")
print("nearby tfrecord done")
rdd.unpersist()
def con_sql(db,sql):
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(list(result))
db.close()
return df
if __name__ == '__main__':
embed_dim = 6
combiner = "sum"
train_values, train_labels, test_values, test_labels, ucity_id_max, clevel1_id_max, ccity_name_max, \
device_type_max, manufacturer_max, channel_max, top_max, time_max = get_data()
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
ucity_id, clevel1_id, ccity_name, device_type, manufacturer, channel, top, \
time, targets, LearningRate = get_inputs()
esmm_embedding_layer = define_embedding_layers(combiner,embed_dim,ucity_id, ucity_id_max, clevel1_id_max,clevel1_id,
ccity_name_max,ccity_name,device_type_max,device_type,manufacturer_max,
manufacturer,channel,channel_max,top,top_max,time,time_max)
ctr_prob, cvr_prob = define_ctr_cvr_layer(esmm_embedding_layer)
with tf.name_scope("loss"):
ctr_prob_one = tf.slice(ctr_prob, [0, 1], [-1, 1]) # [batch_size , 1]
cvr_prob_one = tf.slice(cvr_prob, [0, 1], [-1, 1]) # [batchsize, 1 ]
ctcvr_prob_one = ctr_prob_one * cvr_prob_one # [ctr*cvr]
ctcvr_prob = tf.concat([1 - ctcvr_prob_one, ctcvr_prob_one], axis=1)
ctr_label = tf.slice(targets, [0, 0], [-1, 1]) # target: [click, buy]
ctr_label = tf.concat([1 - ctr_label, ctr_label], axis=1) # [1-click, click]
cvr_label = tf.slice(targets, [0, 1], [-1, 1])
ctcvr_label = tf.concat([1 - cvr_label, cvr_label], axis=1)
# 单列,判断Click是否=1
ctr_clk = tf.slice(targets, [0, 0], [-1, 1])
ctr_clk_dup = tf.concat([ctr_clk, ctr_clk], axis=1)
# clicked subset CVR loss
cvr_loss = - tf.multiply(tf.log(cvr_prob) * ctcvr_label, ctr_clk_dup)
# batch CTR loss
ctr_loss = - tf.log(ctr_prob) * ctr_label # -y*log(p)-(1-y)*log(1-p)
# batch CTCVR loss
ctcvr_loss = - tf.log(ctcvr_prob) * ctcvr_label
# loss = tf.reduce_mean(ctr_loss + ctcvr_loss + cvr_loss)
# loss = tf.reduce_mean(ctr_loss + ctcvr_loss)
# loss = tf.reduce_mean(ctr_loss + cvr_loss)
loss = tf.reduce_mean(cvr_loss)
ctr_loss = tf.reduce_mean(ctr_loss)
cvr_loss = tf.reduce_mean(cvr_loss)
ctcvr_loss = tf.reduce_mean(ctcvr_loss)
# 优化损失
# train_op = tf.train.AdamOptimizer(lr).minimize(loss) #cost
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss) # cost
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
sparkConf = SparkConf().set("spark.hive.mapred.supports.subdirectories", "true") \
.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", "true") \
.set("spark.tispark.plan.allow_index_double_read", "false") \
.set("spark.tispark.plan.allow_index_read", "true") \
.set("spark.sql.extensions", "org.apache.spark.sql.TiExtensions") \
.set("spark.tispark.pd.addresses", "172.16.40.158:2379").set("spark.io.compression.codec", "lzf")\
.set("spark.driver.maxResultSize", "8g").set("spark.sql.avro.compression.codec","snappy")
spark = SparkSession.builder.config(conf=sparkConf).enableHiveSupport().getOrCreate()
ti = pti.TiContext(spark)
ti.tidbMapDatabase("jerry_test")
ti.tidbMapDatabase("eagle")
spark.sparkContext.setLogLevel("WARN")
path = "hdfs:///strategy/esmm/"
local_path = "/home/gmuser/esmm/"
validate_date, value_map, app_list_map, leve2_map, leve3_map = feature_engineer()
get_predict(validate_date, value_map, app_list_map, leve2_map, leve3_map)
......@@ -17,7 +17,6 @@ def app_list_func(x,l):
else:
e.append(0)
return e
# return ",".join([str(j) for j in e])
def multi_hot(df,column,n):
......@@ -32,7 +31,7 @@ def multi_hot(df,column,n):
return number,app_list_map
def feature_engineer():
def feature():
db = pymysql.connect(host='172.16.40.158', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
sql = "select max(stat_date) from esmm_train_data"
validate_date = con_sql(db, sql)[0].values.tolist()[0]
......@@ -99,9 +98,9 @@ def get_predict(date,value_map,app_list_map):
native_pre = spark.createDataFrame(rdd.filter(lambda x:x[4] == 0).map(lambda x:(x[1],x[2],x[3])))\
.toDF("city","uid","cid_id")
print("native")
# native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
native_pre.coalesce(1).write.format('com.databricks.spark.csv').save(path+"hello.csv",header = 'true')
native_pre.toPandas().to_csv(local_path+"native.csv", header=True)
# TODO 写成csv文件改成下面这样
# native_pre.coalesce(1).write.format('com.databricks.spark.csv').save(path+"native/",header = 'true')
# 预测的tfrecord必须写成一个文件,这样可以摆保证顺序
spark.createDataFrame(rdd.filter(lambda x: x[4] == 0).map(lambda x: (x[0],x[5],x[6],x[7]))) \
......@@ -145,7 +144,7 @@ if __name__ == '__main__':
path = "hdfs:///strategy/esmm/"
local_path = "/home/gmuser/esmm/"
validate_date, value_map, app_list_map = feature_engineer()
validate_date, value_map, app_list_map = feature()
get_predict(validate_date, value_map, app_list_map)
# df = spark.read.format("tfrecords").option("recordType", "Example").load("/strategy/va.tfrecord")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment