Commit 4412a6cc authored by 宋柯's avatar 宋柯

模型调试

parent 6a2b8057
import sys
import os
from datetime import date, timedelta
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import time
import redis
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql as sql
from pyspark.sql.functions import when
from pyspark.sql.types import *
from pyspark.sql import functions as F
from collections import defaultdict
import json
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import utils.configUtils as configUtils
# import utils.connUtils as connUtils
import pandas as pd
import math
# os.environ["PYSPARK_PYTHON"]="/usr/bin/python3"
"""
特征工程
"""
NUMBER_PRECISION = 2
VERSION = configUtils.SERVICE_VERSION
FEATURE_USER_KEY = "Strategy:rec:feature:service:" + VERSION + ":user:"
FEATURE_ITEM_KEY = "Strategy:rec:feature:service:" + VERSION + ":item:"
FEATURE_VOCAB_KEY = "Strategy:rec:vocab:service:" + VERSION
FEATURE_COLUMN_KEY = "Strategy:rec:column:service:" + VERSION
ITEM_PREFIX = "ITEM_"
USER_PREFIX = "USER_"
CATEGORY_PREFIX = "CATEGORY_"
MULTI_CATEGORY_PREFIX = "MULTI_CATEGORY_"
NUMERIC_PREFIX = "NUMERIC_"
DATA_PATH_TRAIN = "/data/files/service_feature_{}_train.csv".format(VERSION)
def getRedisConn():
pool = redis.ConnectionPool(host="172.16.50.145",password="XfkMCCdWDIU%ls$h",port=6379,db=0)
conn = redis.Redis(connection_pool=pool)
# conn = redis.Redis(host="172.16.50.145", port=6379, password="XfkMCCdWDIU%ls$h",db=0)
# conn = redis.Redis(host="172.18.51.10", port=6379,db=0) #test
return conn
def parseTags(tags,i):
tags_arr = tags.split(",")
if len(tags_arr) >= i:
return tags_arr[i-1]
else:
return "-1"
def parseTagsFromArray(tagsArray,i):
if len(tagsArray) >= i:
return tagsArray[i - 1]
else:
return "-1"
def numberToBucket(num):
res = 0
if not num:
return str(res)
num = int(num)
if num >= 1000:
res = 1000//10
else:
res = int(num)//10
return str(res)
def priceToBucket(num):
res = 0
if not num:
return str(res)
if num >= 100000:
res = 100000//1000
else:
res = int(num)//1000
return str(res)
numberToBucketUdf = F.udf(numberToBucket, StringType())
priceToBucketUdf = F.udf(priceToBucket, StringType())
def getItemStaticFeatures(itemStatisticDays, startDay, endDay):
itemStatisticStartDay = addDays(-itemStatisticDays)
itemStatisticSql = getItemStatisticSql(itemStatisticStartDay, endDay)
itemStatisticDF = spark.sql(itemStatisticSql)
# itemStatisticDF.show(100, False)
partitionDatas = generatePartitionDates(itemStatisticDays)
partitionDatasBC = spark.sparkContext.broadcast(partitionDatas)
def splitPatitionDatasFlatMapFunc(row):
card_id = row.card_id
label = row.label
partition_date_label_count_list = row.partition_date_label_count_list
partition_date_label_count_dcit = dict(map(lambda s: (s.split('_')[0], s.split('_')[1]), partition_date_label_count_list))
res = []
for partition_date in partitionDatasBC.value:
res.append((card_id, partition_date, label, partition_date_label_count_dcit.get(partition_date, '0')))
return res
itemStatisticDF = itemStatisticDF.rdd.flatMap(splitPatitionDatasFlatMapFunc).toDF(["card_id", "partition_date", "label", "label_count"])
itemStatisticDF.createOrReplaceTempView("itemStatisticDF")
itemStatisticSql = """
SELECT
card_id,
label,
partition_date,
label_count,
COALESCE(SUM(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_sum,
COALESCE(AVG(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_avg,
COALESCE(STDDEV_POP(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_stddev
FROM
itemStatisticDF
WHERE partition_date >= '{startDay}' and partition_date <= '{endDay}'
""".format(itemStatisticStartDays = itemStatisticStartDays, startDay = startDay, endDay = endDay)
print("itemStatisticSql: {}".format(itemStatisticSql))
staticFeatures = spark.sql(itemStatisticSql)
clickStaticFeatures = staticFeatures.where(F.col('label') == F.lit(1))\
.withColumnRenamed('label_count_sum', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_sum')\
.withColumnRenamed('label_count_avg', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_avg')\
.withColumnRenamed('label_count_stddev', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_stddev')
expStaticFeatures = staticFeatures.where(F.col('label') == F.lit(0))\
.withColumnRenamed('label_count_sum', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_sum')\
.withColumnRenamed('label_count_avg', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_avg')\
.withColumnRenamed('label_count_stddev', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_stddev')
drop_columns = ['label', 'label_count']
clickStaticFeatures = clickStaticFeatures.drop(*drop_columns)
# clickStaticFeatures.show(20, truncate = False)
expStaticFeatures = expStaticFeatures.drop(*drop_columns)
# expStaticFeatures.show(20, truncate = False)
return clickStaticFeatures, expStaticFeatures
# ratingDF, itemEsFeatureDF, startDay, endDay
def itemStatisticFeaturesProcess(samples_iEsF_iStatisticF):
# 连续特征分桶
bucket_suffix = "_Bucket"
for col in ["click_count_sum", "click_count_avg", "exp_count_sum", "exp_count_avg"]:
new_col = col + bucket_suffix
samples_iEsF_iStatisticF = samples_iEsF_iStatisticF.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
# 方差处理
number_suffix = "_number"
for col in ["click_count_stddev", "exp_count_stddev"]:
new_col = col + number_suffix
samples_iEsF_iStatisticF = samples_iEsF_iStatisticF.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
samples_iEsF_iStatisticF.show(20, truncate=False)
return samples_iEsF_iStatisticF
def addUserStaticsFeatures(samples,dataVocab):
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount',F.format_number(F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userCtr", F.format_number(F.col("userClickCount")/(F.col("userExpCount")+1),NUMBER_PRECISION).cast("float")) \
.filter(F.col("userRatingCount") > 1)
samples.show(20, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount","userRatingAvg","userClickCount","userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col)))\
.drop(col)\
.withColumn(new_col,F.when(F.col(new_col).isNull(),"0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col,F.when(F.col(col).isNull(),0).otherwise(1/(F.col(col)+1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col, new_col)
samples.printSchema()
samples.show(20, truncate=False)
return samples
from collections import Iterable
def flatten(items):
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def itemEsFeaturesProcess(itemDF, spark):
print("item es 特征工程 ")
item_es_feature_start_time = int(round(time.time()))
item_categoty_cols = ['id', 'service_type', 'merchant_id', 'doctor_type', 'doctor_id',
'doctor_famous', 'hospital_id', 'hospital_city_tag_id', 'hospital_type', 'hospital_is_high_quality']
item_multi_categots_cols =['tags_v3', 'second_demands', 'second_solutions', 'second_positions']
for item_categoty_col in item_categoty_cols:
itemDF[ITEM_PREFIX + CATEGORY_PREFIX + item_categoty_col] = itemDF[item_categoty_col]
itemDF = itemDF.drop(columns = item_categoty_cols)
for item_multi_categots_col in item_multi_categots_cols:
itemDF[ITEM_PREFIX + MULTI_CATEGORY_PREFIX + item_multi_categots_col] = itemDF[item_multi_categots_col]
itemDF = itemDF.drop(columns = item_multi_categots_cols)
item_numeric_cols = ['case_count', 'sales_count', 'discount', 'sku_price']
for item_numeric_col in item_numeric_cols:
itemDF[ITEM_PREFIX + NUMERIC_PREFIX + item_numeric_col] = itemDF[item_numeric_col]
itemDF = itemDF.drop(columns = [item_numeric_cols])
itemEsFeatureDF = spark.createDataFrame(itemDF)
itemEsFeatureDF.printSchema()
itemEsFeatureDF.show(10, truncate=False)
item_es_feature_end_time = int(round(time.time()))
print("item es 特征工程, 耗时: {}s".format(item_es_feature_end_time - item_es_feature_start_time))
return itemEsFeatureDF
def extractTags(genres_list):
# 根据点击列表顺序加权
genres_dict = defaultdict(int)
for i,genres in enumerate(genres_list):
for genre in genres.split(','):
genres_dict[genre] += i
sortedGenres = sorted(genres_dict.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in sortedGenres]
# sql版本不支持F.reverse
def arrayReverse(arr):
arr.reverse()
return arr
"""
p —— 概率,即点击的概率,也就是 CTR
n —— 样本总数,即曝光数
z —— 在正态分布里,均值 + z * 标准差会有一定的置信度。例如 z 取 1.96,就有 95% 的置信度。
Wilson区间的含义就是,就是指在一定置信度下,真实的 CTR 范围是多少
"""
def wilson_ctr(num_pv, num_click):
num_pv = float(num_pv)
num_click = float(num_click)
if num_pv * num_click == 0 or num_pv < num_click:
return 0.0
z = 1.96;
n = num_pv;
p = num_click / num_pv;
score = (p + z*z/(2*n) - z*math.sqrt((p*(1.0 - p) + z*z /(4.0*n))/n)) / (1.0 + z*z/n);
return float(score);
def getUserProfileFeature(spark, startDay, endDay):
#连接doris_olap库
userProfileFeatureDF = spark.read.jdbc('jdbc:mysql://172.16.30.136:3306/doris_olap', 'user_tag3_portrait', numPartitions = 100,
properties = { 'user': 'doris_olap', 'password': 'bA27hXasdfswuolap', 'driver': 'com.mysql.jdbc.Driver' })
userProfileFeatureDF.createOrReplaceTempView("userProfileFeatureDF")
table_query = """
select date as dt, cl_id as device_id, second_solutions, second_demands, second_positions, projects
from userProfileFeatureDF
where date >= '{startDay}' and date <= '{endDay}'
""".format(startDay = startDay, endDay = endDay)
print(table_query)
userProfileFeatureDF = spark.sql(table_query)
def addOneDay(dt):
return (date.fromisoformat(dt) + timedelta(days = 1)).strftime('%Y%m%d')
addOneDay_UDF = F.udf(addOneDay, StringType())
userProfileFeatureDF = userProfileFeatureDF.withColumn('partition_date', addOneDay_UDF('dt'))\
.withColumnRenamed("second_solutions", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_solutions")\
.withColumnRenamed("second_demands", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_demands")\
.withColumnRenamed("second_positions", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_positions")\
.withColumnRenamed("projects", USER_PREFIX + MULTI_CATEGORY_PREFIX + "projects")\
.drop('dt')
userProfileFeatureDF.cache()
userProfileFeatureDF.show(20, False)
return userProfileFeatureDF
def addUserFeatures(samples):
extractTagsUdf = F.udf(extractTags, ArrayType(StringType()))
arrayReverseUdf = F.udf(arrayReverse, ArrayType(StringType()))
ctrUdf = F.udf(wilson_ctr, FloatType())
print("user历史数据处理...")
# user历史记录
samples = samples.withColumn('userPositiveHistory',F.collect_list(when(F.col('label') == 1, F.col('item_id')).otherwise(F.lit(None))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)))
samples = samples.withColumn("userPositiveHistory", arrayReverseUdf(F.col("userPositiveHistory")))
for i in range(1,11):
samples = samples.withColumn("userRatedHistory"+str(i), F.when(F.col("userPositiveHistory")[i-1].isNotNull(),F.col("userPositiveHistory")[i-1]).otherwise("-1"))
samples = samples.drop("userPositiveHistory")
# user偏好
print("user 偏好数据")
for c,v in multiVocab.items():
new_col = "user" + "__" + c
samples = samples.withColumn(new_col, extractTagsUdf(F.collect_list(when(F.col('label') == 1, F.col(c)).otherwise(F.lit(None))).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1))))
for i in range(1, 6):
samples = samples.withColumn(new_col + "__" + str(i),F.when(F.col(new_col)[i - 1].isNotNull(), F.col(new_col)[i - 1]).otherwise("-1"))
dataVocab[new_col + "__" + str(i)] = v
samples = samples.drop(new_col).drop(c)
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount', F.format_number(
F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(
F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(
F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(
F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userCtr",F.format_number(ctrUdf(F.col("userClickCount"),F.col("userExpCount")),NUMBER_PRECISION))\
.filter(F.col("userRatingCount") > 1)
samples.show(10, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount", "userRatingAvg", "userClickCount", "userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col,
new_col)
samples.printSchema()
samples.show(10,truncate=False)
return samples
def addSampleLabel(ratingSamples):
ratingSamples = ratingSamples.withColumn('label', when(F.col('rating') >= 1, 1).otherwise(0))
ratingSamples.show(5, truncate=False)
ratingSamples.printSchema()
return ratingSamples
def samplesNegAndUnion(samplesPos,samplesNeg):
# 正负样本 1:4
pos_count = samplesPos.count()
neg_count = samplesNeg.count()
print("before filter posSize:{},negSize:{}".format(str(pos_count), str(neg_count)))
samplesNeg = samplesNeg.sample(pos_count * 4 / neg_count)
samples = samplesNeg.union(samplesPos)
dataSize = samples.count()
print("dataSize:{}".format(str(dataSize)))
return samples
def splitAndSaveTrainingTestSamplesByTimeStamp(samples,splitTimestamp, file_path):
samples = samples.withColumn("timestampLong", F.col("timestamp").cast(LongType()))
# quantile = smallSamples.stat.approxQuantile("timestampLong", [0.8], 0.05)
# splitTimestamp = quantile[0]
train = samples.where(F.col("timestampLong") <= splitTimestamp).drop("timestampLong")
test = samples.where(F.col("timestampLong") > splitTimestamp).drop("timestampLong")
print("split train size:{},test size:{}".format(str(train.count()),str(test.count())))
trainingSavePath = file_path + '_train'
testSavePath = file_path + '_test'
train.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(trainingSavePath)
test.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(testSavePath)
def collectColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
vocabSet = set()
for d in datas:
if d[column]:
vocabSet.add(str(d[column]))
return list(vocabSet)
def collectMutiColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
tagSet = set()
for d in datas:
if d[column]:
for tag in d[column].split(","):
tagSet.add(tag)
tagSet.add("-1") # 空值默认
return list(tagSet)
def dataVocabToRedis(dataVocab):
conn = getRedisConn()
conn.set(FEATURE_VOCAB_KEY,dataVocab)
conn.expire(FEATURE_VOCAB_KEY,60 * 60 * 24 * 7)
def saveVocab(key, vocab):
conn = getRedisConn()
conn.delete(key)
conn.lpush(key,vocab)
conn.expire(FEATURE_VOCAB_KEY,60 * 60 * 24)
def featureColumnsToRedis(columns):
conn = getRedisConn()
conn.set(FEATURE_COLUMN_KEY, json.dumps(columns))
conn.expire(FEATURE_COLUMN_KEY, 60 * 60 * 24 * 7)
def featureToRedis(key,datas):
conn = getRedisConn()
for k,v in datas.items():
newKey = key+k
conn.set(newKey,v)
conn.expire(newKey, 60 * 60 * 24 * 7)
def userFeaturesToRedis(samples,columns,prefix,redisKey):
idCol = prefix+"id"
timestampCol = idCol+"_timestamp"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
#根据timestamp获取每个user最新的记录
prefixSamples = samples.groupBy(idCol).agg(F.max("timestamp").alias(timestampCol))
resDatas = prefixSamples.join(samples, on=[idCol], how='inner').where(F.col("timestamp") == F.col(timestampCol))
resDatas = resDatas.select(*columns).distinct()
resDatas.show(10,truncate=False)
print(prefix, resDatas.count())
resDatas.repartition(8).foreachPartition(toRedis)
def itemFeaturesToRedis(itemStaticDF,redisKey):
idCol = "item_id"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
itemStaticDF.repartition(8).foreachPartition(toRedis)
"""
数据加载
"""
CONTENT_TYPE = "service"
SERVICE_HOSTS = [
{'host': "172.16.52.33", 'port': 9200},
{'host': "172.16.52.19", 'port': 9200},
{'host': "172.16.52.48", 'port': 9200},
{'host': "172.16.52.27", 'port': 9200},
{'host': "172.16.52.34", 'port': 9200}
]
ES_INDEX = "gm-dbmw-service-read"
ES_INDEX_TEST = "gm_test-service-read"
ACTION_REG = r"""^\\d+$"""
def getEsConn_test():
host_config = [{'host': '172.18.52.14', 'port': 9200}, {'host': '172.18.52.133', 'port': 9200},
{'host': '172.18.52.7', 'port': 9200}]
return Elasticsearch(host_config, http_auth=('elastic', 'gm_test'), timeout=3600)
def getEsConn():
return Elasticsearch(SERVICE_HOSTS, http_auth=('elastic', 'gengmei!@#'), timeout=3600)
def getClickSql(start, end):
sql = """
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, t1.cl_type as os, t1.city_id as user_city_id
FROM
(
select partition_date, city_id, cl_id, business_id as card_id, cl_type
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date>='{startDay}' and partition_date<='{endDay}'
AND page_name='welfare_detail'
AND page_stay >= 2
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
group by partition_date, city_id, cl_id, business_id, cl_type
) AS t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getExposureSql(start, end):
# t1.partition_date, t1.cl_id device_id, t1.card_id, t1.time_stamp, t1.cl_type as os, t1.city_id as user_city_id
sql = """
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, cl_type as os, t1.city_id as user_city_id
from
( --新首页卡片曝光
SELECT partition_date,city_id,cl_type,cl_id,card_id
FROM online.ml_community_precise_exposure_detail
where partition_date>='{startDay}' and partition_date<='{endDay}'
and action in ('page_precise_exposure','home_choiceness_card_exposure')
and cl_id IS NOT NULL
and card_id IS NOT NULL
and is_exposure='1'
--and page_name='home'
--and tab_name='精选'
--and page_name in ('home','search_result_more')
--and ((page_name='home' and tab_name='精选') or (page_name='category' and tab_name = '商品'))
and card_type in ('card','video')
and card_content_type in ('service')
and (get_json_object(exposure_card,'$.in_page_pos') is null or get_json_object(exposure_card,'$.in_page_pos') != 'seckill')
group by partition_date, city_id, cl_type, cl_id, card_id, app_session_id
) t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getItemStatisticSql(start, end):
sql = """
SELECT TTT.card_id, TTT.label, COLLECT_LIST(CONCAT(TTT.partition_date, '_', TTT.label_count)) partition_date_label_count_list
FROM
(
SELECT TT.card_id, TT.partition_date, TT.label, count(1) as label_count
FROM
(
SELECT T.partition_date, T.card_id, T.label
FROM
(
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, t1.cl_type as os, t1.city_id as user_city_id, 1 as label
FROM
(
select partition_date, city_id, cl_id, business_id as card_id, cl_type
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date>='{startDay}' and partition_date<='{endDay}'
AND page_name='welfare_detail'
AND page_stay >= 2
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
group by partition_date, city_id, cl_id, business_id, cl_type
) AS t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
UNION
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, cl_type as os, t1.city_id as user_city_id, 0 as label
from
( --新首页卡片曝光
SELECT partition_date, city_id, cl_type, cl_id, card_id
FROM online.ml_community_precise_exposure_detail
where partition_date>='{startDay}' and partition_date<='{endDay}'
and action in ('page_precise_exposure','home_choiceness_card_exposure')
and cl_id IS NOT NULL
and card_id IS NOT NULL
and is_exposure='1'
--and page_name='home'
--and tab_name='精选'
--and page_name in ('home','search_result_more')
--and ((page_name='home' and tab_name='精选') or (page_name='category' and tab_name = '商品'))
and card_type in ('card','video')
and card_content_type in ('service')
and (get_json_object(exposure_card,'$.in_page_pos') is null or get_json_object(exposure_card,'$.in_page_pos') != 'seckill')
group by partition_date, city_id, cl_type, cl_id, card_id, app_session_id
) t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
) T
) TT
GROUP BY TT.card_id, TT.partition_date, TT.label
) TTT
GROUP BY TTT.card_id, TTT.label
""".format(startDay = start,endDay = end)
print(sql)
return sql
def connectDoris(spark, table):
return spark.read \
.format("jdbc") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("url", "jdbc:mysql://172.16.30.136:3306/doris_prod") \
.option("dbtable", table) \
.option("user", "doris") \
.option("password", "o5gbA27hXHHm") \
.load()
def get_spark(appName):
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
spark = (SparkSession
.builder
.config(conf=sparkConf)
.appName(appName)
.enableHiveSupport()
.getOrCreate())
return spark
def init_es_query():
q = {
"_source": {
"includes":[]
},
"query": {
"bool": {
"must": [],
"must_not": [],
"should": []
}
}
}
return q
def parseSource(_source):
id = str(_source.setdefault("id",-1))
discount = _source.setdefault("discount",0)
case_count = _source.setdefault("case_count",0)
sales_count = _source.setdefault("sales_count",0)
service_type = str(_source.setdefault("service_type",-1))
second_demands = ','.join(_source.setdefault("second_demands",["-1"]))
second_solutions = ','.join(_source.setdefault("second_solutions",["-1"]))
second_positions = ','.join(_source.setdefault("second_positions",["-1"]))
tags_v3 = ','.join(_source.setdefault("tags_v3", ["-1"]))
# sku
sku_list = _source.setdefault("sku_list",[])
sku_tags_list = []
sku_show_tags_list = []
sku_price_list = []
for sku in sku_list:
sku_tags_list += sku.setdefault("sku_tags",[])
# sku_tags_list += sku.setdefault("sku_tags_id",[])
sku_show_tags_list.append(sku.setdefault("show_project_type_name",""))
price = sku.setdefault("price", 0.0)
if price > 0:
sku_price_list.append(price)
# sku_tags = ",".join([str(i) for i in sku_tags_list]) if len(sku_tags_list) > 0 else "-1"
# sku_show_tags = ",".join(sku_show_tags_list) if len(sku_show_tags_list) > 0 else "-1"
sku_price = min(sku_price_list) if len(sku_price_list) > 0 else 0.0
#merchant_id
merchant_id = str(_source.setdefault("merchant_id","-1"))
# doctor_type id famous_doctor
doctor = _source.setdefault("doctor",{})
doctor_type = str(doctor.setdefault("doctor_type","-1"))
doctor_id = str(doctor.setdefault("id","-1"))
doctor_famous = str(int(doctor.setdefault("famous_doctor",False)))
# hospital id city_tag_id hospital_type is_high_quality
hospital = doctor.setdefault("hospital", {})
hospital_id = str(hospital.setdefault("id", "-1"))
hospital_city_tag_id = str(hospital.setdefault("city_tag_id", -1))
hospital_type = str(hospital.setdefault("hospital_type", "-1"))
hospital_is_high_quality = str(int(hospital.setdefault("is_high_quality", False)))
data = [id,
discount,
case_count,
sales_count,
service_type,
merchant_id,
doctor_type,
doctor_id,
doctor_famous,
hospital_id,
hospital_city_tag_id,
hospital_type,
hospital_is_high_quality,
second_demands,
second_solutions,
second_positions,
tags_v3,
# sku_show_tags,
sku_price
]
return data
# es中获取特征
def get_item_es_feature_df():
es_columns = ["id","discount", "sales_count", "doctor", "case_count", "service_type","merchant_id","second_demands", "second_solutions", "second_positions", "sku_list","tags_v3"]
query = init_es_query()
query["_source"]["includes"] = es_columns
print(json.dumps(query), flush=True)
es_cli = getEsConn()
scan_re = scan(client=es_cli, index=ES_INDEX, query=query, scroll='3m')
datas = []
for res in scan_re:
_source = res['_source']
data = parseSource(_source)
datas.append(data)
print("card size: ",len(datas))
itemColumns = ['card_id', ITEM_PREFIX + NUMERIC_PREFIX + 'discount',
ITEM_PREFIX + NUMERIC_PREFIX + 'case_count', ITEM_PREFIX + NUMERIC_PREFIX + 'sales_count',
ITEM_PREFIX + CATEGORY_PREFIX + 'service_type',ITEM_PREFIX + CATEGORY_PREFIX + 'merchant_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_type', ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_famous', ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_city_tag_id', ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_type',
ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_is_high_quality', ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_demands',
ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_solutions', ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_positions',
ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'projects', ITEM_PREFIX + NUMERIC_PREFIX + 'sku_price']
itemEsFeatureDF = pd.DataFrame(datas,columns=itemColumns)
itemEsFeatureDF = spark.createDataFrame(itemEsFeatureDF)
itemEsFeatureDF.printSchema()
# itemEsFeatureDF.show(10, truncate=False)
return itemEsFeatureDF
def addDays(n, format="%Y%m%d"):
return (date.today() + timedelta(days=n)).strftime(format)
def generatePartitionDates(partitionDates):
return [addDays(-trainDay - 1) for trainDay in range(partitionDates)]
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
def get_click_exp_start_end_time(trainDays):
startDay = addDays(-int(trainDays) - 1)
endDay = addDays(-1)
print("click_exp_start_end_time: {}, {}".format(startDay, endDay), flush=True)
return startDay, endDay
def get_click_exp_rating_df(trainDays, spark):
#行为数据的开始结束日期
startDay, endDay = get_click_exp_start_end_time(trainDays)
#获取曝光和点击行为数据
clickSql = getClickSql(startDay,endDay)
expSql = getExposureSql(startDay,endDay)
clickDF = spark.sql(clickSql)
clickDF.createOrReplaceTempView("clickDF")
clickDF.cache()
print("click count: ", clickDF.count())
expDF = spark.sql(expSql)
expDF.createOrReplaceTempView("expDF")
expDF.cache()
#曝光数据过滤掉点击数据
print("expDF 过滤点击数据前 count: ", expDF.count())
expDF = spark.sql("""
SELECT t1.partition_date, t1.device_id, t1.card_id, t1.os, t1.user_city_id
FROM expDF t1
LEFT JOIN clickDF t2
ON t1.partition_date = t2.partition_date
AND t1.device_id = t2.device_id
AND t1.card_id = t2.card_id
AND t1.os = t2.os
AND t1.user_city_id = t2.user_city_id
WHERE t2.device_id is NULL
""")
print("expDF 过滤点击数据后 count: ", expDF.count())
#添加label并且规范字段命名
clickDF = clickDF.withColumn("label", F.lit(1))
expDF = expDF.withColumn("label", F.lit(0))
ratingDF = clickDF.union(expDF)
ratingDF = ratingDF.withColumn("user_city_id", F.when(F.col("user_city_id").isNull(), "-1").otherwise(F.col("user_city_id")))
ratingDF.cache()
print("ratingDF.columns: {}".format(ratingDF.columns))
print(ratingDF.show(20, truncate=False))
expDF.unpersist(True)
clickDF.unpersist(True)
return clickDF, expDF, ratingDF, startDay, endDay
if __name__ == '__main__':
start = time.time()
#入参
trainDays = int(sys.argv[1])
itemStatisticStartDays = int(sys.argv[2])
print('trainDays:{}'.format(trainDays),flush=True)
spark = get_spark("SERVICE_FEATURE_CSV_EXPORT_SK")
spark.sparkContext.setLogLevel("ERROR")
#获取点击曝光数据
clickDF, expDF, ratingDF, startDay, endDay = get_click_exp_rating_df(trainDays, spark)
#item Es Feature
itemEsFeatureDF = get_item_es_feature_df()
#计算 item 统计特征
clickStaticFeatures, expStaticFeatures = getItemStaticFeatures(itemStatisticStartDays + trainDays + 1, startDay, endDay)
#user Profile Feature
userProfileFeatureDF = getUserProfileFeature(spark, addDays(-trainDays - 1, format = "%Y-%m-%d"), addDays(-1, format = "%Y-%m-%d"))
#样本添加 item es feature 和 item 统计 特征
samples = ratingDF.join(userProfileFeatureDF, on = ['device_id', "partition_date"], how = 'left')\
.join(clickStaticFeatures, on = ["card_id", "partition_date"], how = 'left')\
.join(expStaticFeatures, on = ["card_id", "partition_date"], how = 'left')\
.join(itemEsFeatureDF, on = ["card_id"], how = 'left')
samples = samples.withColumnRenamed("card_id", ITEM_PREFIX + CATEGORY_PREFIX + "card_id")\
.withColumnRenamed("device_id", USER_PREFIX + CATEGORY_PREFIX + "device_id") \
.withColumnRenamed("os", USER_PREFIX + CATEGORY_PREFIX + "os") \
.withColumnRenamed("user_city_id", USER_PREFIX + CATEGORY_PREFIX + "user_city_id") \
.drop("timestamp")
# | -- ITEM_CATEGORY_card_id: string(nullable=false)
# | -- partition_date: string(nullable=true)
# | -- USER_CATEGORY_device_id: string(nullable=false)
# | -- USER_CATEGORY_os: string(nullable=false)
# | -- USER_CATEGORY_user_city_id: string(nullable=false)
# | -- label: integer(nullable=false)
# | -- USER_MULTI_CATEGORY_second_solutions: string(nullable=false)
# | -- USER_MULTI_CATEGORY_second_demands: string(nullable=false)
# | -- USER_MULTI_CATEGORY_second_positions: string(nullable=false)
# | -- USER_MULTI_CATEGORY_projects: string(nullable=false)
# | -- ITEM_NUMERIC_click_count_sum: double(nullable=false)
# | -- ITEM_NUMERIC_click_count_avg: double(nullable=false)
# | -- ITEM_NUMERIC_click_count_stddev: double(nullable=false)
# | -- ITEM_NUMERIC_exp_count_sum: double(nullable=false)
# | -- ITEM_NUMERIC_exp_count_avg: double(nullable=false)
# | -- ITEM_NUMERIC_exp_count_stddev: double(nullable=false)
# | -- ITEM_NUMERIC_discount: double(nullable=false)
# | -- ITEM_NUMERIC_case_count: long(nullable=false)
# | -- ITEM_NUMERIC_sales_count: long(nullable=false)
# | -- ITEM_CATEGORY_service_type: string(nullable=false)
# | -- ITEM_CATEGORY_merchant_id: string(nullable=false)
# | -- ITEM_CATEGORY_doctor_type: string(nullable=false)
# | -- ITEM_CATEGORY_doctor_id: string(nullable=false)
# | -- ITEM_CATEGORY_doctor_famous: string(nullable=false)
# | -- ITEM_CATEGORY_hospital_id: string(nullable=false)
# | -- ITEM_CATEGORY_hospital_city_tag_id: string(nullable=false)
# | -- ITEM_CATEGORY_hospital_type: string(nullable=false)
# | -- ITEM_CATEGORY_hospital_is_high_quality: string(nullable=false)
# | -- ITEM_MULTI_CATEGORY_second_demands: string(nullable=false)
# | -- ITEM_MULTI_CATEGORY_second_solutions: string(nullable=false)
# | -- ITEM_MULTI_CATEGORY_second_positions: string(nullable=false)
# | -- ITEM_MULTI_CATEGORY_projects: string(nullable=false)
# | -- ITEM_NUMERIC_sku_price: double(nullable=false)
#
fields = [field.name for field in samples.schema.fields]
multi_categoty_fields = []
categoty_fields = []
fields_na_value_dict = {}
for field in fields:
if field.startswith(ITEM_PREFIX + CATEGORY_PREFIX) or field.startswith(USER_PREFIX + CATEGORY_PREFIX):
fields_na_value_dict[field] = '-1'
categoty_fields.append(field)
elif field.startswith(ITEM_PREFIX + MULTI_CATEGORY_PREFIX) or field.startswith(USER_PREFIX + MULTI_CATEGORY_PREFIX):
fields_na_value_dict[field] = '-1'
multi_categoty_fields.append(field)
elif field.startswith(ITEM_PREFIX + NUMERIC_PREFIX) or field.startswith(USER_PREFIX + NUMERIC_PREFIX):
fields_na_value_dict[field] = 0
samples = samples.na.fill(fields_na_value_dict).coalesce(1)
samples.printSchema()
test_samples = samples.where("partition_date = '{}'".format(endDay))
train_samples = samples.where("partition_date <> '{}'".format(endDay))
train_samples.cache()
train_samples.show(20, False)
write_time_start = time.time()
vocab_redis_keys = []
for categoty_field in categoty_fields:
output_file = "file:///home/gmuser/" + categoty_field + "_vocab"
output_file = "/strategy/" + categoty_field + "_vocab"
# train_samples.select(categoty_field).where(F.col(categoty_field) != '-1').where(F.col(categoty_field) != '').distinct().write.mode("overwrite").options(header="false").csv(output_file)
categoty_field_rows = train_samples.select(categoty_field).where(F.col(categoty_field) != '-1').where(F.col(categoty_field) != '').distinct().collect()
vocab_redis_keys.append("strategy:" + categoty_field + ":vocab")
saveVocab(vocab_redis_keys[-1], list(map(lambda row: row[categoty_field], categoty_field_rows)))
for multi_categoty_field in multi_categoty_fields:
output_file = "file:///home/gmuser/" + multi_categoty_field + "_vocab"
output_file = "/strategy/" + multi_categoty_field + "_vocab"
# train_samples.selectExpr("explode(split({multi_categoty_field},','))".format(multi_categoty_field = multi_categoty_field)).where(F.col(multi_categoty_field) != '-1').distinct().write.mode("overwrite").options(header="false").csv(output_file)
multi_categoty_field_rows = train_samples.selectExpr("explode(split({multi_categoty_field},',')) as {multi_categoty_field}".format(multi_categoty_field = multi_categoty_field)).where(F.col(multi_categoty_field) != '-1').where(F.col(multi_categoty_field) != '').distinct().collect()
vocab_redis_keys.append("strategy:" + multi_categoty_field + ":vocab")
saveVocab(vocab_redis_keys[-1], list(map(lambda row: row[multi_categoty_field], multi_categoty_field_rows)))
saveVocab("strategy:all:vocab", vocab_redis_keys)
output_file = "file:///home/gmuser/train_samples"
output_file = "/strategy/train_samples"
train_samples.write.mode("overwrite").options(header="false", sep='|').csv(output_file)
import tensorflow as tf
def get_example_string(line):
splits = line.split('|')
features = {
'ITEM_CATEGORY_card_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[0].encode()])),
'USER_CATEGORY_device_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[2].encode()])),
'USER_CATEGORY_os': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[3].encode()])),
'USER_CATEGORY_user_city_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[4].encode()])),
'USER_MULTI_CATEGORY_second_solutions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[6].split(','))))),
'USER_MULTI_CATEGORY_second_demands': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[7].split(','))))),
'USER_MULTI_CATEGORY_second_positions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[8].split(','))))),
'USER_MULTI_CATEGORY_projects': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[9].split(','))))),
'ITEM_NUMERIC_click_count_sum': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[10])])),
'ITEM_NUMERIC_click_count_avg': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[11])])),
'ITEM_NUMERIC_click_count_stddev': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[12])])),
'ITEM_NUMERIC_exp_count_sum': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[13])])),
'ITEM_NUMERIC_exp_count_avg': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[14])])),
'ITEM_NUMERIC_exp_count_stddev': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[15])])),
'ITEM_NUMERIC_discount': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[16])])),
'ITEM_NUMERIC_case_count': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[17])])),
'ITEM_NUMERIC_sales_count': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[18])])),
'ITEM_CATEGORY_service_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[19].encode()])),
'ITEM_CATEGORY_merchant_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[20].encode()])),
'ITEM_CATEGORY_doctor_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[21].encode()])),
'ITEM_CATEGORY_doctor_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[22].encode()])),
'ITEM_CATEGORY_doctor_famous': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[23].encode()])),
'ITEM_CATEGORY_hospital_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[24].encode()])),
'ITEM_CATEGORY_hospital_city_tag_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[25].encode()])),
'ITEM_CATEGORY_hospital_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[26].encode()])),
'ITEM_CATEGORY_hospital_is_high_quality': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[27].encode()])),
'ITEM_MULTI_CATEGORY_second_demands': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[28].split(','))))),
'ITEM_MULTI_CATEGORY_second_solutions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[29].split(','))))),
'ITEM_MULTI_CATEGORY_second_positions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[30].split(','))))),
'ITEM_MULTI_CATEGORY_projects': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[31].split(','))))),
'ITEM_NUMERIC_sku_price': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[32])])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(splits[5])])),
}
# print(features)
# print(splits[32])
tf_features = tf.train.Features(feature=features)
tf_example = tf.train.Example(features=tf_features)
tf_serialized = tf_example.SerializeToString()
return tf_serialized
output_file = "file:///home/gmuser/eval_samples"
output_file = "/strategy/eval_samples"
test_samples.write.mode("overwrite").options(header="false", sep='|').csv(output_file)
print("训练数据写入 耗时s:{}".format(time.time() - write_time_start))
print("总耗时:{} mins".format((time.time() - start)/60))
spark.stop()
\ No newline at end of file
import sys
import os
from datetime import date, timedelta
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import time
import redis
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql as sql
from pyspark.sql.functions import when
from pyspark.sql.types import *
from pyspark.sql import functions as F
from collections import defaultdict
import json
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import utils.configUtils as configUtils
# import utils.connUtils as connUtils
import pandas as pd
import math
# os.environ["PYSPARK_PYTHON"]="/usr/bin/python3"
"""
特征工程
"""
NUMBER_PRECISION = 2
VERSION = configUtils.SERVICE_VERSION
FEATURE_USER_KEY = "Strategy:rec:feature:service:" + VERSION + ":user:"
FEATURE_ITEM_KEY = "Strategy:rec:feature:service:" + VERSION + ":item:"
FEATURE_VOCAB_KEY = "Strategy:rec:vocab:service:" + VERSION
FEATURE_COLUMN_KEY = "Strategy:rec:column:service:" + VERSION
ITEM_PREFIX = "ITEM_"
USER_PREFIX = "USER_"
CATEGORY_PREFIX = "CATEGORY_"
MULTI_CATEGORY_PREFIX = "MULTI_CATEGORY_"
NUMERIC_PREFIX = "NUMERIC_"
DATA_PATH_TRAIN = "/data/files/service_feature_{}_train.csv".format(VERSION)
def getRedisConn():
pool = redis.ConnectionPool(host="172.16.50.145",password="XfkMCCdWDIU%ls$h",port=6379,db=0)
conn = redis.Redis(connection_pool=pool)
# conn = redis.Redis(host="172.16.50.145", port=6379, password="XfkMCCdWDIU%ls$h",db=0)
# conn = redis.Redis(host="172.18.51.10", port=6379,db=0) #test
return conn
def parseTags(tags,i):
tags_arr = tags.split(",")
if len(tags_arr) >= i:
return tags_arr[i-1]
else:
return "-1"
def parseTagsFromArray(tagsArray,i):
if len(tagsArray) >= i:
return tagsArray[i - 1]
else:
return "-1"
def numberToBucket(num):
res = 0
if not num:
return str(res)
num = int(num)
if num >= 1000:
res = 1000//10
else:
res = int(num)//10
return str(res)
def priceToBucket(num):
res = 0
if not num:
return str(res)
if num >= 100000:
res = 100000//1000
else:
res = int(num)//1000
return str(res)
numberToBucketUdf = F.udf(numberToBucket, StringType())
priceToBucketUdf = F.udf(priceToBucket, StringType())
def getItemStaticFeatures(itemStatisticDays, startDay, endDay):
itemStatisticStartDay = addDays(-itemStatisticDays)
itemStatisticSql = getItemStatisticSql(itemStatisticStartDay, endDay)
itemStatisticDF = spark.sql(itemStatisticSql)
# itemStatisticDF.show(100, False)
partitionDatas = generatePartitionDates(itemStatisticDays)
partitionDatasBC = spark.sparkContext.broadcast(partitionDatas)
def splitPatitionDatasFlatMapFunc(row):
card_id = row.card_id
label = row.label
partition_date_label_count_list = row.partition_date_label_count_list
partition_date_label_count_dcit = dict(map(lambda s: (s.split('_')[0], s.split('_')[1]), partition_date_label_count_list))
res = []
for partition_date in partitionDatasBC.value:
res.append((card_id, partition_date, label, partition_date_label_count_dcit.get(partition_date, '0')))
return res
itemStatisticDF = itemStatisticDF.rdd.flatMap(splitPatitionDatasFlatMapFunc).toDF(["card_id", "partition_date", "label", "label_count"])
itemStatisticDF.createOrReplaceTempView("itemStatisticDF")
itemStatisticSql = """
SELECT
card_id,
label,
partition_date,
label_count,
COALESCE(SUM(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_sum,
COALESCE(AVG(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_avg,
COALESCE(STDDEV_POP(label_count) OVER(PARTITION BY card_id, label ORDER BY partition_date ROWS BETWEEN {itemStatisticStartDays} PRECEDING AND 1 PRECEDING), 0) label_count_stddev
FROM
itemStatisticDF
WHERE partition_date >= '{startDay}' and partition_date <= '{endDay}'
""".format(itemStatisticStartDays = itemStatisticStartDays, startDay = startDay, endDay = endDay)
print("itemStatisticSql: {}".format(itemStatisticSql))
staticFeatures = spark.sql(itemStatisticSql)
clickStaticFeatures = staticFeatures.where(F.col('label') == F.lit(1))\
.withColumnRenamed('label_count_sum', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_sum')\
.withColumnRenamed('label_count_avg', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_avg')\
.withColumnRenamed('label_count_stddev', ITEM_PREFIX + NUMERIC_PREFIX + 'click_count_stddev')
expStaticFeatures = staticFeatures.where(F.col('label') == F.lit(0))\
.withColumnRenamed('label_count_sum', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_sum')\
.withColumnRenamed('label_count_avg', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_avg')\
.withColumnRenamed('label_count_stddev', ITEM_PREFIX + NUMERIC_PREFIX + 'exp_count_stddev')
drop_columns = ['label', 'label_count']
clickStaticFeatures = clickStaticFeatures.drop(*drop_columns)
# clickStaticFeatures.show(20, truncate = False)
expStaticFeatures = expStaticFeatures.drop(*drop_columns)
# expStaticFeatures.show(20, truncate = False)
return clickStaticFeatures, expStaticFeatures
# ratingDF, itemEsFeatureDF, startDay, endDay
def itemStatisticFeaturesProcess(samples_iEsF_iStatisticF):
# 连续特征分桶
bucket_suffix = "_Bucket"
for col in ["click_count_sum", "click_count_avg", "exp_count_sum", "exp_count_avg"]:
new_col = col + bucket_suffix
samples_iEsF_iStatisticF = samples_iEsF_iStatisticF.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
# 方差处理
number_suffix = "_number"
for col in ["click_count_stddev", "exp_count_stddev"]:
new_col = col + number_suffix
samples_iEsF_iStatisticF = samples_iEsF_iStatisticF.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
samples_iEsF_iStatisticF.show(20, truncate=False)
return samples_iEsF_iStatisticF
def addUserStaticsFeatures(samples,dataVocab):
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount',F.format_number(F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userCtr", F.format_number(F.col("userClickCount")/(F.col("userExpCount")+1),NUMBER_PRECISION).cast("float")) \
.filter(F.col("userRatingCount") > 1)
samples.show(20, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount","userRatingAvg","userClickCount","userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col)))\
.drop(col)\
.withColumn(new_col,F.when(F.col(new_col).isNull(),"0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col,F.when(F.col(col).isNull(),0).otherwise(1/(F.col(col)+1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col, new_col)
samples.printSchema()
samples.show(20, truncate=False)
return samples
from collections import Iterable
def flatten(items):
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def itemEsFeaturesProcess(itemDF, spark):
print("item es 特征工程 ")
item_es_feature_start_time = int(round(time.time()))
item_categoty_cols = ['id', 'service_type', 'merchant_id', 'doctor_type', 'doctor_id',
'doctor_famous', 'hospital_id', 'hospital_city_tag_id', 'hospital_type', 'hospital_is_high_quality']
item_multi_categots_cols =['tags_v3', 'second_demands', 'second_solutions', 'second_positions']
for item_categoty_col in item_categoty_cols:
itemDF[ITEM_PREFIX + CATEGORY_PREFIX + item_categoty_col] = itemDF[item_categoty_col]
itemDF = itemDF.drop(columns = item_categoty_cols)
for item_multi_categots_col in item_multi_categots_cols:
itemDF[ITEM_PREFIX + MULTI_CATEGORY_PREFIX + item_multi_categots_col] = itemDF[item_multi_categots_col]
itemDF = itemDF.drop(columns = item_multi_categots_cols)
item_numeric_cols = ['case_count', 'sales_count', 'discount', 'sku_price']
for item_numeric_col in item_numeric_cols:
itemDF[ITEM_PREFIX + NUMERIC_PREFIX + item_numeric_col] = itemDF[item_numeric_col]
itemDF = itemDF.drop(columns = [item_numeric_cols])
itemEsFeatureDF = spark.createDataFrame(itemDF)
itemEsFeatureDF.printSchema()
itemEsFeatureDF.show(10, truncate=False)
item_es_feature_end_time = int(round(time.time()))
print("item es 特征工程, 耗时: {}s".format(item_es_feature_end_time - item_es_feature_start_time))
return itemEsFeatureDF
def extractTags(genres_list):
# 根据点击列表顺序加权
genres_dict = defaultdict(int)
for i,genres in enumerate(genres_list):
for genre in genres.split(','):
genres_dict[genre] += i
sortedGenres = sorted(genres_dict.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in sortedGenres]
# sql版本不支持F.reverse
def arrayReverse(arr):
arr.reverse()
return arr
"""
p —— 概率,即点击的概率,也就是 CTR
n —— 样本总数,即曝光数
z —— 在正态分布里,均值 + z * 标准差会有一定的置信度。例如 z 取 1.96,就有 95% 的置信度。
Wilson区间的含义就是,就是指在一定置信度下,真实的 CTR 范围是多少
"""
def wilson_ctr(num_pv, num_click):
num_pv = float(num_pv)
num_click = float(num_click)
if num_pv * num_click == 0 or num_pv < num_click:
return 0.0
z = 1.96;
n = num_pv;
p = num_click / num_pv;
score = (p + z*z/(2*n) - z*math.sqrt((p*(1.0 - p) + z*z /(4.0*n))/n)) / (1.0 + z*z/n);
return float(score);
def getUserProfileFeature(spark, startDay, endDay):
#连接doris_olap库
userProfileFeatureDF = spark.read.jdbc('jdbc:mysql://172.16.30.136:3306/doris_olap', 'user_tag3_portrait', numPartitions = 100,
properties = { 'user': 'doris_olap', 'password': 'bA27hXasdfswuolap', 'driver': 'com.mysql.jdbc.Driver' })
userProfileFeatureDF.createOrReplaceTempView("userProfileFeatureDF")
table_query = """
select date as dt, cl_id as device_id, second_solutions, second_demands, second_positions, projects
from userProfileFeatureDF
where date >= '{startDay}' and date <= '{endDay}'
""".format(startDay = startDay, endDay = endDay)
print(table_query)
userProfileFeatureDF = spark.sql(table_query)
def addOneDay(dt):
return (date.fromisoformat(dt) + timedelta(days = 1)).strftime('%Y%m%d')
addOneDay_UDF = F.udf(addOneDay, StringType())
userProfileFeatureDF = userProfileFeatureDF.withColumn('partition_date', addOneDay_UDF('dt'))\
.withColumnRenamed("second_solutions", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_solutions")\
.withColumnRenamed("second_demands", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_demands")\
.withColumnRenamed("second_positions", USER_PREFIX + MULTI_CATEGORY_PREFIX + "second_positions")\
.withColumnRenamed("projects", USER_PREFIX + MULTI_CATEGORY_PREFIX + "projects")\
.drop('dt')
userProfileFeatureDF.cache()
userProfileFeatureDF.show(20, False)
return userProfileFeatureDF
def addUserFeatures(samples):
extractTagsUdf = F.udf(extractTags, ArrayType(StringType()))
arrayReverseUdf = F.udf(arrayReverse, ArrayType(StringType()))
ctrUdf = F.udf(wilson_ctr, FloatType())
print("user历史数据处理...")
# user历史记录
samples = samples.withColumn('userPositiveHistory',F.collect_list(when(F.col('label') == 1, F.col('item_id')).otherwise(F.lit(None))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)))
samples = samples.withColumn("userPositiveHistory", arrayReverseUdf(F.col("userPositiveHistory")))
for i in range(1,11):
samples = samples.withColumn("userRatedHistory"+str(i), F.when(F.col("userPositiveHistory")[i-1].isNotNull(),F.col("userPositiveHistory")[i-1]).otherwise("-1"))
samples = samples.drop("userPositiveHistory")
# user偏好
print("user 偏好数据")
for c,v in multiVocab.items():
new_col = "user" + "__" + c
samples = samples.withColumn(new_col, extractTagsUdf(F.collect_list(when(F.col('label') == 1, F.col(c)).otherwise(F.lit(None))).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1))))
for i in range(1, 6):
samples = samples.withColumn(new_col + "__" + str(i),F.when(F.col(new_col)[i - 1].isNotNull(), F.col(new_col)[i - 1]).otherwise("-1"))
dataVocab[new_col + "__" + str(i)] = v
samples = samples.drop(new_col).drop(c)
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount', F.format_number(
F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(
F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(
F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(
F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userCtr",F.format_number(ctrUdf(F.col("userClickCount"),F.col("userExpCount")),NUMBER_PRECISION))\
.filter(F.col("userRatingCount") > 1)
samples.show(10, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount", "userRatingAvg", "userClickCount", "userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col,
new_col)
samples.printSchema()
samples.show(10,truncate=False)
return samples
def addSampleLabel(ratingSamples):
ratingSamples = ratingSamples.withColumn('label', when(F.col('rating') >= 1, 1).otherwise(0))
ratingSamples.show(5, truncate=False)
ratingSamples.printSchema()
return ratingSamples
def samplesNegAndUnion(samplesPos,samplesNeg):
# 正负样本 1:4
pos_count = samplesPos.count()
neg_count = samplesNeg.count()
print("before filter posSize:{},negSize:{}".format(str(pos_count), str(neg_count)))
samplesNeg = samplesNeg.sample(pos_count * 4 / neg_count)
samples = samplesNeg.union(samplesPos)
dataSize = samples.count()
print("dataSize:{}".format(str(dataSize)))
return samples
def splitAndSaveTrainingTestSamplesByTimeStamp(samples,splitTimestamp, file_path):
samples = samples.withColumn("timestampLong", F.col("timestamp").cast(LongType()))
# quantile = smallSamples.stat.approxQuantile("timestampLong", [0.8], 0.05)
# splitTimestamp = quantile[0]
train = samples.where(F.col("timestampLong") <= splitTimestamp).drop("timestampLong")
test = samples.where(F.col("timestampLong") > splitTimestamp).drop("timestampLong")
print("split train size:{},test size:{}".format(str(train.count()),str(test.count())))
trainingSavePath = file_path + '_train'
testSavePath = file_path + '_test'
train.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(trainingSavePath)
test.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(testSavePath)
def collectColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
vocabSet = set()
for d in datas:
if d[column]:
vocabSet.add(str(d[column]))
return list(vocabSet)
def collectMutiColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
tagSet = set()
for d in datas:
if d[column]:
for tag in d[column].split(","):
tagSet.add(tag)
tagSet.add("-1") # 空值默认
return list(tagSet)
def dataVocabToRedis(dataVocab):
conn = getRedisConn()
conn.set(FEATURE_VOCAB_KEY,dataVocab)
conn.expire(FEATURE_VOCAB_KEY,60 * 60 * 24 * 7)
def saveVocab(key, vocab):
conn = getRedisConn()
conn.delete(key)
conn.lpush(key,vocab)
conn.expire(FEATURE_VOCAB_KEY,60 * 60 * 24)
def featureColumnsToRedis(columns):
conn = getRedisConn()
conn.set(FEATURE_COLUMN_KEY, json.dumps(columns))
conn.expire(FEATURE_COLUMN_KEY, 60 * 60 * 24 * 7)
def featureToRedis(key,datas):
conn = getRedisConn()
for k,v in datas.items():
newKey = key+k
conn.set(newKey,v)
conn.expire(newKey, 60 * 60 * 24 * 7)
def userFeaturesToRedis(samples,columns,prefix,redisKey):
idCol = prefix+"id"
timestampCol = idCol+"_timestamp"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
#根据timestamp获取每个user最新的记录
prefixSamples = samples.groupBy(idCol).agg(F.max("timestamp").alias(timestampCol))
resDatas = prefixSamples.join(samples, on=[idCol], how='inner').where(F.col("timestamp") == F.col(timestampCol))
resDatas = resDatas.select(*columns).distinct()
resDatas.show(10,truncate=False)
print(prefix, resDatas.count())
resDatas.repartition(8).foreachPartition(toRedis)
def itemFeaturesToRedis(itemStaticDF,redisKey):
idCol = "item_id"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
itemStaticDF.repartition(8).foreachPartition(toRedis)
"""
数据加载
"""
CONTENT_TYPE = "service"
SERVICE_HOSTS = [
{'host': "172.16.52.33", 'port': 9200},
{'host': "172.16.52.19", 'port': 9200},
{'host': "172.16.52.48", 'port': 9200},
{'host': "172.16.52.27", 'port': 9200},
{'host': "172.16.52.34", 'port': 9200}
]
ES_INDEX = "gm-dbmw-service-read"
ES_INDEX_TEST = "gm_test-service-read"
ACTION_REG = r"""^\\d+$"""
def getEsConn_test():
host_config = [{'host': '172.18.52.14', 'port': 9200}, {'host': '172.18.52.133', 'port': 9200},
{'host': '172.18.52.7', 'port': 9200}]
return Elasticsearch(host_config, http_auth=('elastic', 'gm_test'), timeout=3600)
def getEsConn():
return Elasticsearch(SERVICE_HOSTS, http_auth=('elastic', 'gengmei!@#'), timeout=3600)
def getClickSql(start, end):
sql = """
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, t1.cl_type as os, t1.city_id as user_city_id
FROM
(
select partition_date, city_id, cl_id, business_id as card_id, cl_type
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date>='{startDay}' and partition_date<='{endDay}'
AND page_name='welfare_detail'
AND page_stay >= 2
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
group by partition_date, city_id, cl_id, business_id, cl_type
) AS t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getExposureSql(start, end):
# t1.partition_date, t1.cl_id device_id, t1.card_id, t1.time_stamp, t1.cl_type as os, t1.city_id as user_city_id
sql = """
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, cl_type as os, t1.city_id as user_city_id
from
( --新首页卡片曝光
SELECT partition_date,city_id,cl_type,cl_id,card_id
FROM online.ml_community_precise_exposure_detail
where partition_date>='{startDay}' and partition_date<='{endDay}'
and action in ('page_precise_exposure','home_choiceness_card_exposure')
and cl_id IS NOT NULL
and card_id IS NOT NULL
and is_exposure='1'
--and page_name='home'
--and tab_name='精选'
--and page_name in ('home','search_result_more')
--and ((page_name='home' and tab_name='精选') or (page_name='category' and tab_name = '商品'))
and card_type in ('card','video')
and card_content_type in ('service')
and (get_json_object(exposure_card,'$.in_page_pos') is null or get_json_object(exposure_card,'$.in_page_pos') != 'seckill')
group by partition_date, city_id, cl_type, cl_id, card_id, app_session_id
) t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getItemStatisticSql(start, end):
sql = """
SELECT TTT.card_id, TTT.label, COLLECT_LIST(CONCAT(TTT.partition_date, '_', TTT.label_count)) partition_date_label_count_list
FROM
(
SELECT TT.card_id, TT.partition_date, TT.label, count(1) as label_count
FROM
(
SELECT T.partition_date, T.card_id, T.label
FROM
(
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, t1.cl_type as os, t1.city_id as user_city_id, 1 as label
FROM
(
select partition_date, city_id, cl_id, business_id as card_id, cl_type
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date>='{startDay}' and partition_date<='{endDay}'
AND page_name='welfare_detail'
AND page_stay >= 2
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
group by partition_date, city_id, cl_id, business_id, cl_type
) AS t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
UNION
SELECT t1.partition_date, t1.cl_id device_id, t1.card_id, cl_type as os, t1.city_id as user_city_id, 0 as label
from
( --新首页卡片曝光
SELECT partition_date, city_id, cl_type, cl_id, card_id
FROM online.ml_community_precise_exposure_detail
where partition_date>='{startDay}' and partition_date<='{endDay}'
and action in ('page_precise_exposure','home_choiceness_card_exposure')
and cl_id IS NOT NULL
and card_id IS NOT NULL
and is_exposure='1'
--and page_name='home'
--and tab_name='精选'
--and page_name in ('home','search_result_more')
--and ((page_name='home' and tab_name='精选') or (page_name='category' and tab_name = '商品'))
and card_type in ('card','video')
and card_content_type in ('service')
and (get_json_object(exposure_card,'$.in_page_pos') is null or get_json_object(exposure_card,'$.in_page_pos') != 'seckill')
group by partition_date, city_id, cl_type, cl_id, card_id, app_session_id
) t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
) T
) TT
GROUP BY TT.card_id, TT.partition_date, TT.label
) TTT
GROUP BY TTT.card_id, TTT.label
""".format(startDay = start,endDay = end)
print(sql)
return sql
def connectDoris(spark, table):
return spark.read \
.format("jdbc") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("url", "jdbc:mysql://172.16.30.136:3306/doris_prod") \
.option("dbtable", table) \
.option("user", "doris") \
.option("password", "o5gbA27hXHHm") \
.load()
def get_spark(appName):
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
spark = (SparkSession
.builder
.config(conf=sparkConf)
.appName(appName)
.enableHiveSupport()
.getOrCreate())
return spark
def init_es_query():
q = {
"_source": {
"includes":[]
},
"query": {
"bool": {
"must": [],
"must_not": [],
"should": []
}
}
}
return q
def parseSource(_source):
id = str(_source.setdefault("id",-1))
discount = _source.setdefault("discount",0)
case_count = _source.setdefault("case_count",0)
sales_count = _source.setdefault("sales_count",0)
service_type = str(_source.setdefault("service_type",-1))
second_demands = ','.join(_source.setdefault("second_demands",["-1"]))
second_solutions = ','.join(_source.setdefault("second_solutions",["-1"]))
second_positions = ','.join(_source.setdefault("second_positions",["-1"]))
tags_v3 = ','.join(_source.setdefault("tags_v3", ["-1"]))
# sku
sku_list = _source.setdefault("sku_list",[])
sku_tags_list = []
sku_show_tags_list = []
sku_price_list = []
for sku in sku_list:
sku_tags_list += sku.setdefault("sku_tags",[])
# sku_tags_list += sku.setdefault("sku_tags_id",[])
sku_show_tags_list.append(sku.setdefault("show_project_type_name",""))
price = sku.setdefault("price", 0.0)
if price > 0:
sku_price_list.append(price)
# sku_tags = ",".join([str(i) for i in sku_tags_list]) if len(sku_tags_list) > 0 else "-1"
# sku_show_tags = ",".join(sku_show_tags_list) if len(sku_show_tags_list) > 0 else "-1"
sku_price = min(sku_price_list) if len(sku_price_list) > 0 else 0.0
#merchant_id
merchant_id = str(_source.setdefault("merchant_id","-1"))
# doctor_type id famous_doctor
doctor = _source.setdefault("doctor",{})
doctor_type = str(doctor.setdefault("doctor_type","-1"))
doctor_id = str(doctor.setdefault("id","-1"))
doctor_famous = str(int(doctor.setdefault("famous_doctor",False)))
# hospital id city_tag_id hospital_type is_high_quality
hospital = doctor.setdefault("hospital", {})
hospital_id = str(hospital.setdefault("id", "-1"))
hospital_city_tag_id = str(hospital.setdefault("city_tag_id", -1))
hospital_type = str(hospital.setdefault("hospital_type", "-1"))
hospital_is_high_quality = str(int(hospital.setdefault("is_high_quality", False)))
data = [id,
discount,
case_count,
sales_count,
service_type,
merchant_id,
doctor_type,
doctor_id,
doctor_famous,
hospital_id,
hospital_city_tag_id,
hospital_type,
hospital_is_high_quality,
second_demands,
second_solutions,
second_positions,
tags_v3,
# sku_show_tags,
sku_price
]
return data
# es中获取特征
def get_item_es_feature_df():
es_columns = ["id","discount", "sales_count", "doctor", "case_count", "service_type","merchant_id","second_demands", "second_solutions", "second_positions", "sku_list","tags_v3"]
query = init_es_query()
query["_source"]["includes"] = es_columns
print(json.dumps(query), flush=True)
es_cli = getEsConn()
scan_re = scan(client=es_cli, index=ES_INDEX, query=query, scroll='3m')
datas = []
for res in scan_re:
_source = res['_source']
data = parseSource(_source)
datas.append(data)
print("card size: ",len(datas))
itemColumns = ['card_id', ITEM_PREFIX + NUMERIC_PREFIX + 'discount',
ITEM_PREFIX + NUMERIC_PREFIX + 'case_count', ITEM_PREFIX + NUMERIC_PREFIX + 'sales_count',
ITEM_PREFIX + CATEGORY_PREFIX + 'service_type',ITEM_PREFIX + CATEGORY_PREFIX + 'merchant_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_type', ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'doctor_famous', ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_id',
ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_city_tag_id', ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_type',
ITEM_PREFIX + CATEGORY_PREFIX + 'hospital_is_high_quality', ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_demands',
ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_solutions', ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'second_positions',
ITEM_PREFIX + MULTI_CATEGORY_PREFIX + 'projects', ITEM_PREFIX + NUMERIC_PREFIX + 'sku_price']
itemEsFeatureDF = pd.DataFrame(datas,columns=itemColumns)
itemEsFeatureDF = spark.createDataFrame(itemEsFeatureDF)
itemEsFeatureDF.printSchema()
# itemEsFeatureDF.show(10, truncate=False)
return itemEsFeatureDF
def addDays(n, format="%Y%m%d"):
return (date.today() + timedelta(days=n)).strftime(format)
def generatePartitionDates(partitionDates):
return [addDays(-trainDay - 1) for trainDay in range(partitionDates)]
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
def get_click_exp_start_end_time(trainDays):
startDay = addDays(-int(trainDays) - 1)
endDay = addDays(-1)
print("click_exp_start_end_time: {}, {}".format(startDay, endDay), flush=True)
return startDay, endDay
def get_click_exp_rating_df(trainDays, spark):
#行为数据的开始结束日期
startDay, endDay = get_click_exp_start_end_time(trainDays)
#获取曝光和点击行为数据
clickSql = getClickSql(startDay,endDay)
expSql = getExposureSql(startDay,endDay)
clickDF = spark.sql(clickSql)
clickDF.createOrReplaceTempView("clickDF")
clickDF.cache()
print("click count: ", clickDF.count())
expDF = spark.sql(expSql)
expDF.createOrReplaceTempView("expDF")
expDF.cache()
#曝光数据过滤掉点击数据
print("expDF 过滤点击数据前 count: ", expDF.count())
expDF = spark.sql("""
SELECT t1.partition_date, t1.device_id, t1.card_id, t1.os, t1.user_city_id
FROM expDF t1
LEFT JOIN clickDF t2
ON t1.partition_date = t2.partition_date
AND t1.device_id = t2.device_id
AND t1.card_id = t2.card_id
AND t1.os = t2.os
AND t1.user_city_id = t2.user_city_id
WHERE t2.device_id is NULL
""")
print("expDF 过滤点击数据后 count: ", expDF.count())
#添加label并且规范字段命名
clickDF = clickDF.withColumn("label", F.lit(1))
expDF = expDF.withColumn("label", F.lit(0))
ratingDF = clickDF.union(expDF)
ratingDF = ratingDF.withColumn("user_city_id", F.when(F.col("user_city_id").isNull(), "-1").otherwise(F.col("user_city_id")))
ratingDF.cache()
print("ratingDF.columns: {}".format(ratingDF.columns))
print(ratingDF.show(20, truncate=False))
expDF.unpersist(True)
clickDF.unpersist(True)
return clickDF, expDF, ratingDF, startDay, endDay
if __name__ == '__main__':
start = time.time()
spark = get_spark("SERVICE_FEATURE_CSV_EXPORT_SK")
spark.sparkContext.setLogLevel("ERROR")
output_file = "file:///home/gmuser/train_samples"
output_file = "/strategy/train_samples"
train_samples = spark.read.csv(output_file)
train_samples.show(100, False)
# train_samples.write.mode("overwrite").options(header="false", sep='|').csv(output_file)
import tensorflow as tf
def get_example_string(line):
splits = line.split('|')
features = {
'ITEM_CATEGORY_card_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[0].encode()])),
'USER_CATEGORY_device_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[2].encode()])),
'USER_CATEGORY_os': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[3].encode()])),
'USER_CATEGORY_user_city_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[4].encode()])),
'USER_MULTI_CATEGORY_second_solutions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[6].split(','))))),
'USER_MULTI_CATEGORY_second_demands': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[7].split(','))))),
'USER_MULTI_CATEGORY_second_positions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[8].split(','))))),
'USER_MULTI_CATEGORY_projects': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[9].split(','))))),
'ITEM_NUMERIC_click_count_sum': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[10])])),
'ITEM_NUMERIC_click_count_avg': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[11])])),
'ITEM_NUMERIC_click_count_stddev': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[12])])),
'ITEM_NUMERIC_exp_count_sum': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[13])])),
'ITEM_NUMERIC_exp_count_avg': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[14])])),
'ITEM_NUMERIC_exp_count_stddev': tf.train.Feature(
float_list=tf.train.FloatList(value=[float(splits[15])])),
'ITEM_NUMERIC_discount': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[16])])),
'ITEM_NUMERIC_case_count': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[17])])),
'ITEM_NUMERIC_sales_count': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[18])])),
'ITEM_CATEGORY_service_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[19].encode()])),
'ITEM_CATEGORY_merchant_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[20].encode()])),
'ITEM_CATEGORY_doctor_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[21].encode()])),
'ITEM_CATEGORY_doctor_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[splits[22].encode()])),
'ITEM_CATEGORY_doctor_famous': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[23].encode()])),
'ITEM_CATEGORY_hospital_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[24].encode()])),
'ITEM_CATEGORY_hospital_city_tag_id': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[25].encode()])),
'ITEM_CATEGORY_hospital_type': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[26].encode()])),
'ITEM_CATEGORY_hospital_is_high_quality': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[splits[27].encode()])),
'ITEM_MULTI_CATEGORY_second_demands': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[28].split(','))))),
'ITEM_MULTI_CATEGORY_second_solutions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[29].split(','))))),
'ITEM_MULTI_CATEGORY_second_positions': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[30].split(','))))),
'ITEM_MULTI_CATEGORY_projects': tf.train.Feature(
bytes_list=tf.train.BytesList(value=list(map(lambda s: s.encode(), splits[31].split(','))))),
'ITEM_NUMERIC_sku_price': tf.train.Feature(float_list=tf.train.FloatList(value=[float(splits[32])])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(splits[5])])),
}
# print(features)
# print(splits[32])
tf_features = tf.train.Features(feature=features)
tf_example = tf.train.Example(features=tf_features)
tf_serialized = tf_example.SerializeToString()
return tf_serialized
# output_file = "file:///home/gmuser/eval_samples"
# output_file = "/strategy/eval_samples"
# test_samples.write.mode("overwrite").options(header="false", sep='|').csv(output_file)
# print("训练数据写入 耗时s:{}".format(time.time() - write_time_start))
print("总耗时:{} mins".format((time.time() - start)/60))
spark.stop()
\ No newline at end of file
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
import sys
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
BASE_DIR = '/Users/edz/software/Recommend/'
def input_fn(csv_path, epoch, shuffle, batch_size):
dataset = tf.data.TextLineDataset(csv_path)
def parse_line(line_tensor):
splits = tf.compat.v1.string_split([line_tensor], delimiter='|', skip_empty=False).values
return {
'ITEM_CATEGORY_card_id': splits[0],
'USER_CATEGORY_device_id': splits[2],
'USER_CATEGORY_os': splits[3],
'USER_CATEGORY_user_city_id': splits[4],
'USER_MULTI_CATEGORY_second_solutions': tf.compat.v1.string_split([splits[6]], delimiter=',').values,
'USER_MULTI_CATEGORY_second_demands': tf.compat.v1.string_split([splits[7]], delimiter=',').values,
'USER_MULTI_CATEGORY_second_positions': tf.compat.v1.string_split([splits[8]], delimiter=',').values,
'USER_MULTI_CATEGORY_projects': tf.compat.v1.string_split([splits[9]], delimiter=',').values,
'ITEM_NUMERIC_click_count_sum': tf.compat.v1.string_to_number(splits[10]),
'ITEM_NUMERIC_click_count_avg': tf.compat.v1.string_to_number(splits[11]),
'ITEM_NUMERIC_click_count_stddev': tf.compat.v1.string_to_number(splits[12]),
'ITEM_NUMERIC_exp_count_sum': tf.compat.v1.string_to_number(splits[13]),
'ITEM_NUMERIC_exp_count_avg': tf.compat.v1.string_to_number(splits[14]),
'ITEM_NUMERIC_exp_count_stddev': tf.compat.v1.string_to_number(splits[15]),
'ITEM_NUMERIC_discount': tf.compat.v1.string_to_number(splits[16]),
'ITEM_NUMERIC_case_count': tf.compat.v1.string_to_number(splits[17]),
'ITEM_NUMERIC_sales_count': tf.compat.v1.string_to_number(splits[18]),
'ITEM_CATEGORY_service_type': splits[19],
'ITEM_CATEGORY_merchant_id': splits[20],
'ITEM_CATEGORY_doctor_type': splits[21],
'ITEM_CATEGORY_doctor_id': splits[22],
'ITEM_CATEGORY_doctor_famous': splits[23],
'ITEM_CATEGORY_hospital_id': splits[24],
'ITEM_CATEGORY_hospital_city_tag_id': splits[25],
'ITEM_CATEGORY_hospital_type': splits[26],
'ITEM_CATEGORY_hospital_is_high_quality': splits[27],
'ITEM_MULTI_CATEGORY_second_demands': tf.compat.v1.string_split([splits[28]], delimiter=',').values,
'ITEM_MULTI_CATEGORY_second_solutions': tf.compat.v1.string_split([splits[29]],
delimiter=',').values,
'ITEM_MULTI_CATEGORY_second_positions': tf.compat.v1.string_split([splits[30]],
delimiter=',').values,
'ITEM_MULTI_CATEGORY_projects': tf.compat.v1.string_split([splits[31]], delimiter=',').values,
'ITEM_NUMERIC_sku_price': tf.compat.v1.string_to_number(splits[32]),
# 'label': tf.compat.v1.string_to_number(splits[5])
}, tf.compat.v1.string_to_number(splits[5])
padded_shapes = ({'ITEM_CATEGORY_card_id': (), 'USER_CATEGORY_device_id': (), 'USER_CATEGORY_os': (),
'USER_CATEGORY_user_city_id': (), 'USER_MULTI_CATEGORY_second_solutions': [-1],
'USER_MULTI_CATEGORY_second_demands': [-1], 'USER_MULTI_CATEGORY_second_positions': [-1],
'USER_MULTI_CATEGORY_projects': [-1], 'ITEM_NUMERIC_click_count_sum': (),
'ITEM_NUMERIC_click_count_avg': (), 'ITEM_NUMERIC_click_count_stddev': (),
'ITEM_NUMERIC_exp_count_sum': (), 'ITEM_NUMERIC_exp_count_avg': (),
'ITEM_NUMERIC_exp_count_stddev': (), 'ITEM_NUMERIC_discount': (), 'ITEM_NUMERIC_case_count': (),
'ITEM_NUMERIC_sales_count': (), 'ITEM_CATEGORY_service_type': (), 'ITEM_CATEGORY_merchant_id': (),
'ITEM_CATEGORY_doctor_type': (), 'ITEM_CATEGORY_doctor_id': (), 'ITEM_CATEGORY_doctor_famous': (),
'ITEM_CATEGORY_hospital_id': (), 'ITEM_CATEGORY_hospital_city_tag_id': (),
'ITEM_CATEGORY_hospital_type': (), 'ITEM_CATEGORY_hospital_is_high_quality': (),
'ITEM_MULTI_CATEGORY_second_demands': [-1], 'ITEM_MULTI_CATEGORY_second_solutions': [-1],
'ITEM_MULTI_CATEGORY_second_positions': [-1], 'ITEM_MULTI_CATEGORY_projects': [-1],
'ITEM_NUMERIC_sku_price': ()}, ())
padding_values = ({'ITEM_CATEGORY_card_id': '-1', 'USER_CATEGORY_device_id': '-1', 'USER_CATEGORY_os': '-1',
'USER_CATEGORY_user_city_id': '-1', 'USER_MULTI_CATEGORY_second_solutions': '-1',
'USER_MULTI_CATEGORY_second_demands': '-1', 'USER_MULTI_CATEGORY_second_positions': '-1',
'USER_MULTI_CATEGORY_projects': '-1', 'ITEM_NUMERIC_click_count_sum': 0.0,
'ITEM_NUMERIC_click_count_avg': 0.0, 'ITEM_NUMERIC_click_count_stddev': 0.0,
'ITEM_NUMERIC_exp_count_sum': 0.0, 'ITEM_NUMERIC_exp_count_avg': 0.0,
'ITEM_NUMERIC_exp_count_stddev': 0.0, 'ITEM_NUMERIC_discount': 0.0,
'ITEM_NUMERIC_case_count': 0.0, 'ITEM_NUMERIC_sales_count': 0.0,
'ITEM_CATEGORY_service_type': '-1', 'ITEM_CATEGORY_merchant_id': '-1',
'ITEM_CATEGORY_doctor_type': '-1', 'ITEM_CATEGORY_doctor_id': '-1',
'ITEM_CATEGORY_doctor_famous': '-1', 'ITEM_CATEGORY_hospital_id': '-1',
'ITEM_CATEGORY_hospital_city_tag_id': '-1', 'ITEM_CATEGORY_hospital_type': '-1',
'ITEM_CATEGORY_hospital_is_high_quality': '-1', 'ITEM_MULTI_CATEGORY_second_demands': '-1',
'ITEM_MULTI_CATEGORY_second_solutions': '-1', 'ITEM_MULTI_CATEGORY_second_positions': '-1',
'ITEM_MULTI_CATEGORY_projects': '-1', 'ITEM_NUMERIC_sku_price': 0.0}, 0.0)
dataset = dataset.map(parse_line, num_parallel_calls = tf.data.experimental.AUTOTUNE)
if shuffle:
dataset = dataset.shuffle(1024)
else:
dataset = dataset
dataset = dataset.padded_batch(batch_size, padded_shapes, padding_values = padding_values)
dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset.repeat(epoch)
return dataset
boundaries = [0, 10, 100]
ITEM_NUMERIC_click_count_sum_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_sum'), boundaries)
ITEM_NUMERIC_exp_count_sum_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_sum'), boundaries)
ITEM_NUMERIC_click_count_avg_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_avg'), boundaries)
ITEM_NUMERIC_exp_count_avg_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_avg'), boundaries)
boundaries = [0, 0.01, 0.1]
ITEM_NUMERIC_click_count_stddev_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_stddev'), boundaries)
ITEM_NUMERIC_exp_count_stddev_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_stddev'), boundaries)
boundaries = [0, 0.01, 0.1, 1]
ITEM_NUMERIC_discount_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_discount'), boundaries)
boundaries = [0, 10, 100]
ITEM_NUMERIC_case_count_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_case_count'), boundaries)
ITEM_NUMERIC_sales_count_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_sales_count'), boundaries)
ITEM_NUMERIC_sku_price_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_sku_price'), boundaries)
USER_CATEGORY_device_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_device_id', BASE_DIR + 'USER_CATEGORY_device_id_vocab.csv')
USER_CATEGORY_os_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_os', BASE_DIR + 'USER_CATEGORY_os_vocab.csv')
USER_CATEGORY_user_city_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_user_city_id', BASE_DIR + 'USER_CATEGORY_user_city_id_vocab.csv')
USER_MULTI_CATEGORY__second_solutions_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_solutions', BASE_DIR + 'USER_MULTI_CATEGORY_second_solutions_vocab.csv')
USER_MULTI_CATEGORY__second_positions_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_positions', BASE_DIR + 'USER_MULTI_CATEGORY_second_positions_vocab.csv')
USER_MULTI_CATEGORY__second_demands_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_demands', BASE_DIR + 'USER_MULTI_CATEGORY_second_demands_vocab.csv')
USER_MULTI_CATEGORY__projects_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_projects', BASE_DIR + 'USER_MULTI_CATEGORY_projects_vocab.csv')
ITEM_CATEGORY_card_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_card_id', BASE_DIR + 'ITEM_CATEGORY_card_id_vocab.csv')
ITEM_CATEGORY_service_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_service_type', BASE_DIR + 'ITEM_CATEGORY_service_type_vocab.csv')
ITEM_CATEGORY_merchant_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_merchant_id', BASE_DIR + 'ITEM_CATEGORY_merchant_id_vocab.csv')
ITEM_CATEGORY_doctor_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_type', BASE_DIR + 'ITEM_CATEGORY_doctor_type_vocab.csv')
ITEM_CATEGORY_doctor_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_id', BASE_DIR + 'ITEM_CATEGORY_doctor_id_vocab.csv')
ITEM_CATEGORY_doctor_famous_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_famous', BASE_DIR + 'ITEM_CATEGORY_doctor_famous_vocab.csv')
ITEM_CATEGORY_hospital_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_id', BASE_DIR + 'ITEM_CATEGORY_hospital_id_vocab.csv')
ITEM_CATEGORY_hospital_city_tag_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_city_tag_id', BASE_DIR + 'ITEM_CATEGORY_hospital_city_tag_id_vocab.csv')
ITEM_CATEGORY_hospital_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_type', BASE_DIR + 'ITEM_CATEGORY_hospital_type_vocab.csv')
ITEM_CATEGORY_hospital_is_high_quality_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_is_high_quality', BASE_DIR + 'ITEM_CATEGORY_hospital_is_high_quality_vocab.csv')
ITEM_MULTI_CATEGORY__second_solutions_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_solutions', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_solutions_vocab.csv')
ITEM_MULTI_CATEGORY__second_positions_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_positions', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_positions_vocab.csv')
ITEM_MULTI_CATEGORY__second_demands_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_demands', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_demands_vocab.csv')
ITEM_MULTI_CATEGORY__projects_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_projects', BASE_DIR + 'ITEM_MULTI_CATEGORY_projects_vocab.csv')
def embedding_fc(categorical_column, dim):
return tf.feature_column.embedding_column(categorical_column, dim)
linear_feature_columns = [
ITEM_NUMERIC_click_count_sum_fc,
ITEM_NUMERIC_exp_count_sum_fc,
ITEM_NUMERIC_click_count_avg_fc,
ITEM_NUMERIC_exp_count_avg_fc,
ITEM_NUMERIC_click_count_stddev_fc,
ITEM_NUMERIC_exp_count_stddev_fc,
ITEM_NUMERIC_discount_fc,
ITEM_NUMERIC_case_count_fc,
ITEM_NUMERIC_sales_count_fc,
ITEM_NUMERIC_sku_price_fc,
embedding_fc(ITEM_CATEGORY_card_id_fc, 1),
embedding_fc(ITEM_CATEGORY_service_type_fc, 1),
embedding_fc(ITEM_CATEGORY_merchant_id_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_type_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_id_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_famous_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_id_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_city_tag_id_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_type_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_is_high_quality_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__projects_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_demands_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_positions_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_solutions_fc, 1),
]
dnn_feature_columns = [
embedding_fc(USER_CATEGORY_device_id_fc, 8),
embedding_fc(USER_CATEGORY_os_fc, 8),
embedding_fc(USER_CATEGORY_user_city_id_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_solutions_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_positions_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_demands_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__projects_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_sum_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_sum_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_avg_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_avg_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_stddev_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_stddev_fc, 8),
embedding_fc(ITEM_NUMERIC_discount_fc, 8),
embedding_fc(ITEM_NUMERIC_case_count_fc, 8),
embedding_fc(ITEM_NUMERIC_sales_count_fc, 8),
embedding_fc(ITEM_NUMERIC_sku_price_fc, 8),
embedding_fc(ITEM_CATEGORY_card_id_fc, 8),
embedding_fc(ITEM_CATEGORY_service_type_fc, 8),
embedding_fc(ITEM_CATEGORY_merchant_id_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_type_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_id_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_famous_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_id_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_city_tag_id_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_type_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_is_high_quality_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__projects_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_demands_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_positions_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_solutions_fc, 8),
]
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0, 1, 2"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
distribution = tf.distribute.MirroredStrategy()
# session_config = tf.compat.v1.ConfigProto(log_device_placement = True, allow_soft_placement = True)
session_config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
session_config.gpu_options.allow_growth = True
# config = tf.estimator.RunConfig(save_checkpoints_steps = 10000, train_distribute = distribution, eval_distribute = distribution)
config = tf.estimator.RunConfig(save_checkpoints_steps = 10000, session_config = session_config)
wideAndDeepModel = tf.estimator.DNNLinearCombinedClassifier(model_dir = BASE_DIR + 'model',
linear_feature_columns = linear_feature_columns,
dnn_feature_columns = dnn_feature_columns,
dnn_hidden_units = [128, 32],
dnn_dropout = 0.5,
config = config)
# early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(wideAndDeepModel, eval_dir = wideAndDeepModel.eval_dir(), metric_name='auc', max_steps_without_decrease=1000, min_steps = 100)
# early_stopping = tf.contrib.estimator.stop_if_no_increase_hook(wideAndDeepModel, metric_name = 'auc', max_steps_without_increase = 1000, min_steps = 1000)
hooks = [tf.train.ProfilerHook(save_steps=100, output_dir='./profile/')]
train_spec = tf.estimator.TrainSpec(input_fn = lambda: input_fn(BASE_DIR + 'train_samples.csv', 20, True, 512), hooks = hooks)
serving_feature_spec = tf.feature_column.make_parse_example_spec(
linear_feature_columns + dnn_feature_columns)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name = "best_exporter",
compare_fn = lambda best_eval_result, current_eval_result: current_eval_result['auc'] > best_eval_result['auc'],
serving_input_receiver_fn = serving_input_receiver_fn,
exports_to_keep = 3)
eval_spec = tf.estimator.EvalSpec(input_fn = lambda: input_fn(BASE_DIR + 'eval_samples.csv', 1, False, 2 ** 15), steps = None, throttle_secs = 120, exporters = exporter)
# def my_auc(labels, predictions):
# return {'auc_pr_careful_interpolation': tf.metrics.auc(labels, predictions['logistic'], curve='ROC',
# summation_method='careful_interpolation')}
# wideAndDeepModel = tf.contrib.estimator.add_metrics(wideAndDeepModel, my_auc)
tf.estimator.train_and_evaluate(wideAndDeepModel, train_spec, eval_spec)
wideAndDeepModel.evaluate(lambda: input_fn(BASE_DIR + 'eval_samples.csv', 1, False, 2 ** 15))
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
import sys
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
BASE_DIR = '/Users/edz/software/Recommend/'
def input_fn(csv_path, epoch, shuffle, batch_size):
dataset = tf.data.TFRecordDataset(csv_path, buffer_size = 1024, num_parallel_reads = 3, compression_type = 'ZLIB')
dics = {
'ITEM_CATEGORY_card_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'USER_CATEGORY_device_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'USER_CATEGORY_os': tf.FixedLenFeature((), tf.string, default_value='-1'),
'USER_CATEGORY_user_city_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'USER_MULTI_CATEGORY_second_solutions': tf.VarLenFeature(tf.string),
'USER_MULTI_CATEGORY_second_demands': tf.VarLenFeature(tf.string),
'USER_MULTI_CATEGORY_second_positions': tf.VarLenFeature(tf.string),
'USER_MULTI_CATEGORY_projects': tf.VarLenFeature(tf.string),
'ITEM_NUMERIC_click_count_sum': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_click_count_avg': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_click_count_stddev': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_exp_count_sum': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_exp_count_avg': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_exp_count_stddev': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_discount': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_case_count': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_NUMERIC_sales_count': tf.FixedLenFeature((), tf.float32, default_value=0),
'ITEM_CATEGORY_service_type': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_merchant_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_doctor_type': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_doctor_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_doctor_famous': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_hospital_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_hospital_city_tag_id': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_hospital_type': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_CATEGORY_hospital_is_high_quality': tf.FixedLenFeature((), tf.string, default_value='-1'),
'ITEM_MULTI_CATEGORY_second_demands': tf.VarLenFeature(tf.string),
'ITEM_MULTI_CATEGORY_second_solutions': tf.VarLenFeature(tf.string),
'ITEM_MULTI_CATEGORY_second_positions': tf.VarLenFeature(tf.string),
'ITEM_MULTI_CATEGORY_projects': tf.VarLenFeature(tf.string),
'ITEM_NUMERIC_sku_price': tf.FixedLenFeature((), tf.float32, default_value=0),
'label': tf.FixedLenFeature((), tf.int64, default_value=0),
}
def parse_serialized_example(serialized_example):
parsed_example = tf.parse_single_example(serialized_example, dics)
parsed_example['USER_MULTI_CATEGORY_second_solutions'] = tf.sparse_tensor_to_dense(
parsed_example['USER_MULTI_CATEGORY_second_solutions'], default_value='-1')
parsed_example['USER_MULTI_CATEGORY_second_demands'] = tf.sparse_tensor_to_dense(
parsed_example['USER_MULTI_CATEGORY_second_demands'], default_value='-1')
parsed_example['USER_MULTI_CATEGORY_second_positions'] = tf.sparse_tensor_to_dense(
parsed_example['USER_MULTI_CATEGORY_second_positions'], default_value='-1')
parsed_example['USER_MULTI_CATEGORY_projects'] = tf.sparse_tensor_to_dense(
parsed_example['USER_MULTI_CATEGORY_projects'], default_value='-1')
parsed_example['ITEM_MULTI_CATEGORY_second_demands'] = tf.sparse_tensor_to_dense(
parsed_example['ITEM_MULTI_CATEGORY_second_demands'], default_value='-1')
parsed_example['ITEM_MULTI_CATEGORY_second_solutions'] = tf.sparse_tensor_to_dense(
parsed_example['ITEM_MULTI_CATEGORY_second_solutions'], default_value='-1')
parsed_example['ITEM_MULTI_CATEGORY_second_positions'] = tf.sparse_tensor_to_dense(
parsed_example['ITEM_MULTI_CATEGORY_second_positions'], default_value='-1')
parsed_example['ITEM_MULTI_CATEGORY_projects'] = tf.sparse_tensor_to_dense(
parsed_example['ITEM_MULTI_CATEGORY_projects'], default_value='-1')
return parsed_example, parsed_example.pop('label')
padded_shapes = ({'ITEM_CATEGORY_card_id': (), 'USER_CATEGORY_device_id': (), 'USER_CATEGORY_os': (),
'USER_CATEGORY_user_city_id': (), 'USER_MULTI_CATEGORY_second_solutions': [-1],
'USER_MULTI_CATEGORY_second_demands': [-1], 'USER_MULTI_CATEGORY_second_positions': [-1],
'USER_MULTI_CATEGORY_projects': [-1], 'ITEM_NUMERIC_click_count_sum': (),
'ITEM_NUMERIC_click_count_avg': (), 'ITEM_NUMERIC_click_count_stddev': (),
'ITEM_NUMERIC_exp_count_sum': (), 'ITEM_NUMERIC_exp_count_avg': (),
'ITEM_NUMERIC_exp_count_stddev': (), 'ITEM_NUMERIC_discount': (), 'ITEM_NUMERIC_case_count': (),
'ITEM_NUMERIC_sales_count': (), 'ITEM_CATEGORY_service_type': (), 'ITEM_CATEGORY_merchant_id': (),
'ITEM_CATEGORY_doctor_type': (), 'ITEM_CATEGORY_doctor_id': (), 'ITEM_CATEGORY_doctor_famous': (),
'ITEM_CATEGORY_hospital_id': (), 'ITEM_CATEGORY_hospital_city_tag_id': (),
'ITEM_CATEGORY_hospital_type': (), 'ITEM_CATEGORY_hospital_is_high_quality': (),
'ITEM_MULTI_CATEGORY_second_demands': [-1], 'ITEM_MULTI_CATEGORY_second_solutions': [-1],
'ITEM_MULTI_CATEGORY_second_positions': [-1], 'ITEM_MULTI_CATEGORY_projects': [-1],
'ITEM_NUMERIC_sku_price': ()}, ())
padding_values = ({'ITEM_CATEGORY_card_id': '-1', 'USER_CATEGORY_device_id': '-1', 'USER_CATEGORY_os': '-1',
'USER_CATEGORY_user_city_id': '-1', 'USER_MULTI_CATEGORY_second_solutions': '-1',
'USER_MULTI_CATEGORY_second_demands': '-1', 'USER_MULTI_CATEGORY_second_positions': '-1',
'USER_MULTI_CATEGORY_projects': '-1', 'ITEM_NUMERIC_click_count_sum': 0.0,
'ITEM_NUMERIC_click_count_avg': 0.0, 'ITEM_NUMERIC_click_count_stddev': 0.0,
'ITEM_NUMERIC_exp_count_sum': 0.0, 'ITEM_NUMERIC_exp_count_avg': 0.0,
'ITEM_NUMERIC_exp_count_stddev': 0.0, 'ITEM_NUMERIC_discount': 0.0,
'ITEM_NUMERIC_case_count': 0.0, 'ITEM_NUMERIC_sales_count': 0.0,
'ITEM_CATEGORY_service_type': '-1', 'ITEM_CATEGORY_merchant_id': '-1',
'ITEM_CATEGORY_doctor_type': '-1', 'ITEM_CATEGORY_doctor_id': '-1',
'ITEM_CATEGORY_doctor_famous': '-1', 'ITEM_CATEGORY_hospital_id': '-1',
'ITEM_CATEGORY_hospital_city_tag_id': '-1', 'ITEM_CATEGORY_hospital_type': '-1',
'ITEM_CATEGORY_hospital_is_high_quality': '-1', 'ITEM_MULTI_CATEGORY_second_demands': '-1',
'ITEM_MULTI_CATEGORY_second_solutions': '-1', 'ITEM_MULTI_CATEGORY_second_positions': '-1',
'ITEM_MULTI_CATEGORY_projects': '-1', 'ITEM_NUMERIC_sku_price': 0.0},
tf.constant(0, dtype=tf.int64))
dataset = dataset.map(parse_serialized_example, num_parallel_calls = tf.data.experimental.AUTOTUNE)
if shuffle:
dataset = dataset.shuffle(1024)
else:
dataset = dataset
dataset = dataset.padded_batch(batch_size, padded_shapes, padding_values = padding_values)
dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset.repeat(epoch)
return dataset
boundaries = [0, 10, 100]
ITEM_NUMERIC_click_count_sum_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_sum'), boundaries)
ITEM_NUMERIC_exp_count_sum_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_sum'), boundaries)
ITEM_NUMERIC_click_count_avg_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_avg'), boundaries)
ITEM_NUMERIC_exp_count_avg_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_avg'), boundaries)
boundaries = [0, 0.01, 0.1]
ITEM_NUMERIC_click_count_stddev_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_click_count_stddev'), boundaries)
ITEM_NUMERIC_exp_count_stddev_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_exp_count_stddev'), boundaries)
boundaries = [0, 0.01, 0.1, 1]
ITEM_NUMERIC_discount_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_discount'), boundaries)
boundaries = [0, 10, 100]
ITEM_NUMERIC_case_count_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_case_count'), boundaries)
ITEM_NUMERIC_sales_count_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_sales_count'), boundaries)
ITEM_NUMERIC_sku_price_fc = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('ITEM_NUMERIC_sku_price'), boundaries)
USER_CATEGORY_device_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_device_id', BASE_DIR + 'USER_CATEGORY_device_id_vocab.csv')
USER_CATEGORY_os_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_os', BASE_DIR + 'USER_CATEGORY_os_vocab.csv')
USER_CATEGORY_user_city_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_CATEGORY_user_city_id', BASE_DIR + 'USER_CATEGORY_user_city_id_vocab.csv')
USER_MULTI_CATEGORY__second_solutions_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_solutions', BASE_DIR + 'USER_MULTI_CATEGORY_second_solutions_vocab.csv')
USER_MULTI_CATEGORY__second_positions_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_positions', BASE_DIR + 'USER_MULTI_CATEGORY_second_positions_vocab.csv')
USER_MULTI_CATEGORY__second_demands_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_second_demands', BASE_DIR + 'USER_MULTI_CATEGORY_second_demands_vocab.csv')
USER_MULTI_CATEGORY__projects_fc = tf.feature_column.categorical_column_with_vocabulary_file('USER_MULTI_CATEGORY_projects', BASE_DIR + 'USER_MULTI_CATEGORY_projects_vocab.csv')
ITEM_CATEGORY_card_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_card_id', BASE_DIR + 'ITEM_CATEGORY_card_id_vocab.csv')
ITEM_CATEGORY_service_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_service_type', BASE_DIR + 'ITEM_CATEGORY_service_type_vocab.csv')
ITEM_CATEGORY_merchant_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_merchant_id', BASE_DIR + 'ITEM_CATEGORY_merchant_id_vocab.csv')
ITEM_CATEGORY_doctor_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_type', BASE_DIR + 'ITEM_CATEGORY_doctor_type_vocab.csv')
ITEM_CATEGORY_doctor_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_id', BASE_DIR + 'ITEM_CATEGORY_doctor_id_vocab.csv')
ITEM_CATEGORY_doctor_famous_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_doctor_famous', BASE_DIR + 'ITEM_CATEGORY_doctor_famous_vocab.csv')
ITEM_CATEGORY_hospital_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_id', BASE_DIR + 'ITEM_CATEGORY_hospital_id_vocab.csv')
ITEM_CATEGORY_hospital_city_tag_id_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_city_tag_id', BASE_DIR + 'ITEM_CATEGORY_hospital_city_tag_id_vocab.csv')
ITEM_CATEGORY_hospital_type_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_type', BASE_DIR + 'ITEM_CATEGORY_hospital_type_vocab.csv')
ITEM_CATEGORY_hospital_is_high_quality_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_CATEGORY_hospital_is_high_quality', BASE_DIR + 'ITEM_CATEGORY_hospital_is_high_quality_vocab.csv')
ITEM_MULTI_CATEGORY__second_solutions_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_solutions', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_solutions_vocab.csv')
ITEM_MULTI_CATEGORY__second_positions_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_positions', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_positions_vocab.csv')
ITEM_MULTI_CATEGORY__second_demands_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_second_demands', BASE_DIR + 'ITEM_MULTI_CATEGORY_second_demands_vocab.csv')
ITEM_MULTI_CATEGORY__projects_fc = tf.feature_column.categorical_column_with_vocabulary_file('ITEM_MULTI_CATEGORY_projects', BASE_DIR + 'ITEM_MULTI_CATEGORY_projects_vocab.csv')
def embedding_fc(categorical_column, dim):
return tf.feature_column.embedding_column(categorical_column, dim)
linear_feature_columns = [
ITEM_NUMERIC_click_count_sum_fc,
ITEM_NUMERIC_exp_count_sum_fc,
ITEM_NUMERIC_click_count_avg_fc,
ITEM_NUMERIC_exp_count_avg_fc,
ITEM_NUMERIC_click_count_stddev_fc,
ITEM_NUMERIC_exp_count_stddev_fc,
ITEM_NUMERIC_discount_fc,
ITEM_NUMERIC_case_count_fc,
ITEM_NUMERIC_sales_count_fc,
ITEM_NUMERIC_sku_price_fc,
embedding_fc(ITEM_CATEGORY_card_id_fc, 1),
embedding_fc(ITEM_CATEGORY_service_type_fc, 1),
embedding_fc(ITEM_CATEGORY_merchant_id_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_type_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_id_fc, 1),
embedding_fc(ITEM_CATEGORY_doctor_famous_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_id_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_city_tag_id_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_type_fc, 1),
embedding_fc(ITEM_CATEGORY_hospital_is_high_quality_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__projects_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_demands_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_positions_fc, 1),
embedding_fc(ITEM_MULTI_CATEGORY__second_solutions_fc, 1),
]
dnn_feature_columns = [
embedding_fc(USER_CATEGORY_device_id_fc, 8),
embedding_fc(USER_CATEGORY_os_fc, 8),
embedding_fc(USER_CATEGORY_user_city_id_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_solutions_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_positions_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__second_demands_fc, 8),
embedding_fc(USER_MULTI_CATEGORY__projects_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_sum_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_sum_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_avg_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_avg_fc, 8),
embedding_fc(ITEM_NUMERIC_click_count_stddev_fc, 8),
embedding_fc(ITEM_NUMERIC_exp_count_stddev_fc, 8),
embedding_fc(ITEM_NUMERIC_discount_fc, 8),
embedding_fc(ITEM_NUMERIC_case_count_fc, 8),
embedding_fc(ITEM_NUMERIC_sales_count_fc, 8),
embedding_fc(ITEM_NUMERIC_sku_price_fc, 8),
embedding_fc(ITEM_CATEGORY_card_id_fc, 8),
embedding_fc(ITEM_CATEGORY_service_type_fc, 8),
embedding_fc(ITEM_CATEGORY_merchant_id_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_type_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_id_fc, 8),
embedding_fc(ITEM_CATEGORY_doctor_famous_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_id_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_city_tag_id_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_type_fc, 8),
embedding_fc(ITEM_CATEGORY_hospital_is_high_quality_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__projects_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_demands_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_positions_fc, 8),
embedding_fc(ITEM_MULTI_CATEGORY__second_solutions_fc, 8),
]
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0, 1, 2"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
distribution = tf.distribute.MirroredStrategy()
# session_config = tf.compat.v1.ConfigProto(log_device_placement = True, allow_soft_placement = True)
session_config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
session_config.gpu_options.allow_growth = True
# config = tf.estimator.RunConfig(save_checkpoints_steps = 10000, train_distribute = distribution, eval_distribute = distribution)
config = tf.estimator.RunConfig(save_checkpoints_steps = 10000, session_config = session_config)
wideAndDeepModel = tf.estimator.DNNLinearCombinedClassifier(model_dir = BASE_DIR + 'model',
linear_feature_columns = linear_feature_columns,
dnn_feature_columns = dnn_feature_columns,
dnn_hidden_units = [128, 32],
dnn_dropout = 0.5,
config = config)
# early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(wideAndDeepModel, eval_dir = wideAndDeepModel.eval_dir(), metric_name='auc', max_steps_without_decrease=1000, min_steps = 100)
# early_stopping = tf.contrib.estimator.stop_if_no_increase_hook(wideAndDeepModel, metric_name = 'auc', max_steps_without_increase = 1000, min_steps = 1000)
hooks = [tf.train.ProfilerHook(save_steps=100, output_dir='./profile/')]
train_spec = tf.estimator.TrainSpec(input_fn = lambda: input_fn(BASE_DIR + 'train_samples.tfrecord', 20, True, 512), hooks = hooks)
serving_feature_spec = tf.feature_column.make_parse_example_spec(
linear_feature_columns + dnn_feature_columns)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name = "best_exporter",
compare_fn = lambda best_eval_result, current_eval_result: current_eval_result['auc'] > best_eval_result['auc'],
serving_input_receiver_fn = serving_input_receiver_fn,
exports_to_keep = 3)
eval_spec = tf.estimator.EvalSpec(input_fn = lambda: input_fn(BASE_DIR + 'eval_samples.tfrecord', 1, False, 2 ** 15), steps = None, throttle_secs = 120, exporters = exporter)
# def my_auc(labels, predictions):
# return {'auc_pr_careful_interpolation': tf.metrics.auc(labels, predictions['logistic'], curve='ROC',
# summation_method='careful_interpolation')}
# wideAndDeepModel = tf.contrib.estimator.add_metrics(wideAndDeepModel, my_auc)
tf.estimator.train_and_evaluate(wideAndDeepModel, train_spec, eval_spec)
wideAndDeepModel.evaluate(lambda: input_fn(BASE_DIR + 'eval_samples.tfrecord', 1, False, 2 ** 15))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment