Commit 5f911d88 authored by 宋柯's avatar 宋柯

模型调试

parent 6a2815e9
......@@ -174,6 +174,7 @@ def addItemFeatures(itemDF,dataVocab,multi_col_vocab):
itemDF = itemDF.drop(columns=onehot_col)
for c in multi_col:
#TODO 这里多标签的应该拆开
multi_col_vocab[c] = list(set(itemDF[c].tolist()))
for i in range(1, 6):
......@@ -884,15 +885,13 @@ if __name__ == '__main__':
print("dataVocab:")
for k, v in dataVocab.items():
print(k, len(v), v)
print(k, len(v))
itemDF_spark = spark.createDataFrame(itemDF)
itemDF_spark.printSchema()
itemDF_spark.show(10, truncate=False)
sys.exit(1)
# item统计特征处理
itemStaticDF = addItemStaticFeatures(ratingSamplesWithLabel,itemDF_spark,dataVocab)
......
import sys
import os
from datetime import date, timedelta
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import time
import redis
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql as sql
from pyspark.sql.functions import when
from pyspark.sql.types import *
from pyspark.sql import functions as F
from collections import defaultdict
import json
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import utils.configUtils as configUtils
# import utils.connUtils as connUtils
import pandas as pd
import math
# os.environ["PYSPARK_PYTHON"]="/usr/bin/python3"
"""
特征工程
"""
NUMBER_PRECISION = 2
VERSION = configUtils.SERVICE_VERSION
FEATURE_USER_KEY = "Strategy:rec:feature:service:" + VERSION + ":user:"
FEATURE_ITEM_KEY = "Strategy:rec:feature:service:" + VERSION + ":item:"
FEATURE_VOCAB_KEY = "Strategy:rec:vocab:service:" + VERSION
FEATURE_COLUMN_KEY = "Strategy:rec:column:service:" + VERSION
ITEM_PREFIX = "item_"
DATA_PATH_TRAIN = "/data/files/service_feature_{}_train.csv".format(VERSION)
def getRedisConn():
pool = redis.ConnectionPool(host="172.16.50.145",password="XfkMCCdWDIU%ls$h",port=6379,db=0)
conn = redis.Redis(connection_pool=pool)
# conn = redis.Redis(host="172.16.50.145", port=6379, password="XfkMCCdWDIU%ls$h",db=0)
# conn = redis.Redis(host="172.18.51.10", port=6379,db=0) #test
return conn
def parseTags(tags,i):
tags_arr = tags.split(",")
if len(tags_arr) >= i:
return tags_arr[i-1]
else:
return "-1"
def numberToBucket(num):
res = 0
if not num:
return str(res)
if num >= 1000:
res = 1000//10
else:
res = int(num)//10
return str(res)
def priceToBucket(num):
res = 0
if not num:
return str(res)
if num >= 100000:
res = 100000//1000
else:
res = int(num)//1000
return str(res)
numberToBucketUdf = F.udf(numberToBucket, StringType())
priceToBucketUdf = F.udf(priceToBucket, StringType())
def addItemStaticFeatures(samples,itemDF,dataVocab):
ctrUdf = F.udf(wilson_ctr, FloatType())
# item不设置over窗口,原因:item可能一直存在,统计数据按照最新即可
print("item统计特征处理...")
staticFeatures = samples.groupBy('item_id').agg(F.count(F.lit(1)).alias('itemRatingCount'),
F.avg(F.col('rating')).alias('itemRatingAvg'),
F.stddev(F.col('rating')).alias('itemRatingStddev'),
F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).alias("itemClickCount"),
F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).alias("itemExpCount")
).fillna(0) \
.withColumn('itemRatingStddev', F.format_number(F.col('itemRatingStddev'), NUMBER_PRECISION).cast("float")) \
.withColumn('itemRatingAvg', F.format_number(F.col('itemRatingAvg'), NUMBER_PRECISION).cast("float")) \
.withColumn('itemCtr',F.format_number(ctrUdf(F.col("itemClickCount"),(F.col("itemExpCount"))), NUMBER_PRECISION).cast("float"))
staticFeatures.show(20, truncate=False)
staticFeatures = itemDF.join(staticFeatures, on=["item_id"], how='left')
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["itemRatingCount","itemRatingAvg", "itemClickCount", "itemExpCount"]:
new_col = col + bucket_suffix
staticFeatures = staticFeatures.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["itemRatingStddev"]:
new_col = col + number_suffix
staticFeatures = staticFeatures.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
for col in ["itemCtr"]:
new_col = col + number_suffix
staticFeatures = staticFeatures.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col,new_col)
print("item size:", staticFeatures.count())
staticFeatures.show(5, truncate=False)
return staticFeatures
def addUserStaticsFeatures(samples,dataVocab):
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount',F.format_number(F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)), NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)),NUMBER_PRECISION).cast("float")) \
.withColumn("userCtr", F.format_number(F.col("userClickCount")/(F.col("userExpCount")+1),NUMBER_PRECISION).cast("float")) \
.filter(F.col("userRatingCount") > 1)
samples.show(20, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount","userRatingAvg","userClickCount","userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col)))\
.drop(col)\
.withColumn(new_col,F.when(F.col(new_col).isNull(),"0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col,F.when(F.col(col).isNull(),0).otherwise(1/(F.col(col)+1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col, new_col)
samples.printSchema()
samples.show(20, truncate=False)
return samples
def addItemFeatures(itemDF,dataVocab,multi_col_vocab):
# multi_col = ['sku_tags', 'sku_show_tags','second_demands', 'second_solutions', 'second_positions']
multi_col = ['tags_v3','second_demands', 'second_solutions', 'second_positions']
onehot_col = ['id','service_type', 'merchant_id','doctor_type', 'doctor_id', 'doctor_famous', 'hospital_id', 'hospital_city_tag_id', 'hospital_type','hospital_is_high_quality']
for col in onehot_col:
new_c = ITEM_PREFIX + col
dataVocab[new_c] = list(set(itemDF[col].tolist()))
itemDF[new_c] = itemDF[col]
itemDF = itemDF.drop(columns=onehot_col)
for c in multi_col:
#TODO 这里多标签的应该拆开
multi_col_vocab[c] = list(set(itemDF[c].tolist()))
for i in range(1, 6):
new_c = ITEM_PREFIX + c + "__" + str(i)
itemDF[new_c] = itemDF[c].map(lambda x:parseTags(x,i))
dataVocab[new_c] = multi_col_vocab[c]
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ['case_count', 'sales_count']:
new_col = ITEM_PREFIX + col + bucket_suffix
itemDF[new_col] = itemDF[col].map(numberToBucket)
itemDF = itemDF.drop(columns=[col])
dataVocab[new_col] = bucket_vocab
for col in ['sku_price']:
new_col = ITEM_PREFIX + col + bucket_suffix
itemDF[new_col] = itemDF[col].map(priceToBucket)
itemDF = itemDF.drop(columns=[col])
dataVocab[new_col] = bucket_vocab
# 连续数据处理
number_suffix = "_number"
for col in ["discount"]:
new_col = ITEM_PREFIX + col + number_suffix
itemDF[new_col] = itemDF[col]
itemDF = itemDF.drop(columns=[col])
return itemDF
def extractTags(genres_list):
# 根据点击列表顺序加权
genres_dict = defaultdict(int)
for i,genres in enumerate(genres_list):
for genre in genres.split(','):
genres_dict[genre] += i
sortedGenres = sorted(genres_dict.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in sortedGenres]
# sql版本不支持F.reverse
def arrayReverse(arr):
arr.reverse()
return arr
"""
p —— 概率,即点击的概率,也就是 CTR
n —— 样本总数,即曝光数
z —— 在正态分布里,均值 + z * 标准差会有一定的置信度。例如 z 取 1.96,就有 95% 的置信度。
Wilson区间的含义就是,就是指在一定置信度下,真实的 CTR 范围是多少
"""
def wilson_ctr(num_pv, num_click):
num_pv = float(num_pv)
num_click = float(num_click)
if num_pv * num_click == 0 or num_pv < num_click:
return 0.0
z = 1.96;
n = num_pv;
p = num_click / num_pv;
score = (p + z*z/(2*n) - z*math.sqrt((p*(1.0 - p) + z*z /(4.0*n))/n)) / (1.0 + z*z/n);
return float(score);
def addUserFeatures(samples,dataVocab,multiVocab):
dataVocab["userid"] = collectColumnToVocab(samples, "userid")
dataVocab["user_city_id"] = collectColumnToVocab(samples, "user_city_id")
dataVocab["user_os"] = ["ios","android"]
extractTagsUdf = F.udf(extractTags, ArrayType(StringType()))
arrayReverseUdf = F.udf(arrayReverse, ArrayType(StringType()))
ctrUdf = F.udf(wilson_ctr, FloatType())
print("user历史数据处理...")
# user历史记录
samples = samples.withColumn('userPositiveHistory',F.collect_list(when(F.col('label') == 1, F.col('item_id')).otherwise(F.lit(None))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)))
samples = samples.withColumn("userPositiveHistory", arrayReverseUdf(F.col("userPositiveHistory")))
for i in range(1,11):
samples = samples.withColumn("userRatedHistory"+str(i), F.when(F.col("userPositiveHistory")[i-1].isNotNull(),F.col("userPositiveHistory")[i-1]).otherwise("-1"))
dataVocab["userRatedHistory"+str(i)] = dataVocab["item_id"]
samples = samples.drop("userPositiveHistory")
# user偏好
print("user 偏好数据")
for c,v in multiVocab.items():
new_col = "user" + "__" + c
samples = samples.withColumn(new_col, extractTagsUdf(F.collect_list(when(F.col('label') == 1, F.col(c)).otherwise(F.lit(None))).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1))))
for i in range(1, 6):
samples = samples.withColumn(new_col + "__" + str(i),F.when(F.col(new_col)[i - 1].isNotNull(), F.col(new_col)[i - 1]).otherwise("-1"))
dataVocab[new_col + "__" + str(i)] = v
samples = samples.drop(new_col).drop(c)
print("user统计特征处理...")
samples = samples \
.withColumn('userRatingCount', F.format_number(
F.sum(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingAvg", F.format_number(
F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userRatingStddev", F.format_number(
F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),
NUMBER_PRECISION).cast("float")) \
.withColumn("userClickCount", F.format_number(
F.sum(when(F.col('label') == 1, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userExpCount", F.format_number(F.sum(when(F.col('label') == 0, F.lit(1)).otherwise(F.lit(0))).over(
sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1)), NUMBER_PRECISION).cast(
"float")) \
.withColumn("userCtr",F.format_number(ctrUdf(F.col("userClickCount"),F.col("userExpCount")),NUMBER_PRECISION))\
.filter(F.col("userRatingCount") > 1)
samples.show(10, truncate=False)
# 连续特征分桶
bucket_vocab = [str(i) for i in range(101)]
bucket_suffix = "_Bucket"
for col in ["userRatingCount", "userRatingAvg", "userClickCount", "userExpCount"]:
new_col = col + bucket_suffix
samples = samples.withColumn(new_col, numberToBucketUdf(F.col(col))) \
.drop(col) \
.withColumn(new_col, F.when(F.col(new_col).isNull(), "0").otherwise(F.col(new_col)))
dataVocab[new_col] = bucket_vocab
# 方差处理
number_suffix = "_number"
for col in ["userRatingStddev"]:
new_col = col + number_suffix
samples = samples.withColumn(new_col, F.when(F.col(col).isNull(), 0).otherwise(1 / (F.col(col) + 1))).drop(col)
for col in ["userCtr"]:
new_col = col + number_suffix
samples = samples.withColumn(col, F.when(F.col(col).isNull(), 0).otherwise(F.col(col))).withColumnRenamed(col,
new_col)
samples.printSchema()
samples.show(10,truncate=False)
return samples
def addSampleLabel(ratingSamples):
ratingSamples = ratingSamples.withColumn('label', when(F.col('rating') >= 1, 1).otherwise(0))
ratingSamples.show(5, truncate=False)
ratingSamples.printSchema()
return ratingSamples
def samplesNegAndUnion(samplesPos,samplesNeg):
# 正负样本 1:4
pos_count = samplesPos.count()
neg_count = samplesNeg.count()
print("before filter posSize:{},negSize:{}".format(str(pos_count), str(neg_count)))
samplesNeg = samplesNeg.sample(pos_count * 4 / neg_count)
samples = samplesNeg.union(samplesPos)
dataSize = samples.count()
print("dataSize:{}".format(str(dataSize)))
return samples
def splitAndSaveTrainingTestSamplesByTimeStamp(samples,splitTimestamp, file_path):
samples = samples.withColumn("timestampLong", F.col("timestamp").cast(LongType()))
# quantile = smallSamples.stat.approxQuantile("timestampLong", [0.8], 0.05)
# splitTimestamp = quantile[0]
train = samples.where(F.col("timestampLong") <= splitTimestamp).drop("timestampLong")
test = samples.where(F.col("timestampLong") > splitTimestamp).drop("timestampLong")
print("split train size:{},test size:{}".format(str(train.count()),str(test.count())))
trainingSavePath = file_path + '_train'
testSavePath = file_path + '_test'
train.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(trainingSavePath)
test.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(testSavePath)
def collectColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
vocabSet = set()
for d in datas:
if d[column]:
vocabSet.add(str(d[column]))
return list(vocabSet)
def collectMutiColumnToVocab(samples,column):
datas = samples.select(column).distinct().collect()
tagSet = set()
for d in datas:
if d[column]:
for tag in d[column].split(","):
tagSet.add(tag)
tagSet.add("-1") # 空值默认
return list(tagSet)
def dataVocabToRedis(dataVocab):
conn = getRedisConn()
conn.set(FEATURE_VOCAB_KEY,dataVocab)
conn.expire(FEATURE_VOCAB_KEY,60 * 60 * 24 * 7)
def featureColumnsToRedis(columns):
conn = getRedisConn()
conn.set(FEATURE_COLUMN_KEY, json.dumps(columns))
conn.expire(FEATURE_COLUMN_KEY, 60 * 60 * 24 * 7)
def featureToRedis(key,datas):
conn = getRedisConn()
for k,v in datas.items():
newKey = key+k
conn.set(newKey,v)
conn.expire(newKey, 60 * 60 * 24 * 7)
def userFeaturesToRedis(samples,columns,prefix,redisKey):
idCol = prefix+"id"
timestampCol = idCol+"_timestamp"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
#根据timestamp获取每个user最新的记录
prefixSamples = samples.groupBy(idCol).agg(F.max("timestamp").alias(timestampCol))
resDatas = prefixSamples.join(samples, on=[idCol], how='inner').where(F.col("timestamp") == F.col(timestampCol))
resDatas = resDatas.select(*columns).distinct()
resDatas.show(10,truncate=False)
print(prefix, resDatas.count())
resDatas.repartition(8).foreachPartition(toRedis)
def itemFeaturesToRedis(itemStaticDF,redisKey):
idCol = "item_id"
def toRedis(datas):
conn = getRedisConn()
for d in datas:
k = d[idCol]
v = json.dumps(d.asDict(), ensure_ascii=False)
newKey = redisKey + k
conn.set(newKey, v)
conn.expire(newKey, 60 * 60 * 24 * 7)
itemStaticDF.repartition(8).foreachPartition(toRedis)
"""
数据加载
"""
CONTENT_TYPE = "service"
SERVICE_HOSTS = [
{'host': "172.16.52.33", 'port': 9200},
{'host': "172.16.52.19", 'port': 9200},
{'host': "172.16.52.48", 'port': 9200},
{'host': "172.16.52.27", 'port': 9200},
{'host': "172.16.52.34", 'port': 9200}
]
ES_INDEX = "gm-dbmw-service-read"
ES_INDEX_TEST = "gm_test-service-read"
ACTION_REG = r"""^\\d+$"""
def getEsConn_test():
host_config = [{'host': '172.18.52.14', 'port': 9200}, {'host': '172.18.52.133', 'port': 9200},
{'host': '172.18.52.7', 'port': 9200}]
return Elasticsearch(host_config, http_auth=('elastic', 'gm_test'), timeout=3600)
def getEsConn():
return Elasticsearch(SERVICE_HOSTS, http_auth=('elastic', 'gengmei!@#'), timeout=3600)
def getClickSql(start, end):
sql = """
SELECT DISTINCT t1.partition_date, t1.cl_id device_id, t1.card_id,t1.time_stamp,t1.cl_type as os,t1.city_id as user_city_id
FROM
(
select partition_date,city_id,cl_id,business_id as card_id,time_stamp,page_stay,cl_type
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date>='{startDay}' and partition_date<='{endDay}'
AND page_name='welfare_detail'
-- AND page_stay>=1
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
group by partition_date,city_id,cl_id,business_id,time_stamp,page_stay,cl_type
) AS t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getExposureSql(start, end):
# t1.partition_date, t1.cl_id device_id, t1.card_id, t1.time_stamp, t1.cl_type as os, t1.city_id as user_city_id
sql = """
SELECT DISTINCT t1.partition_date,t1.cl_id device_id,t1.card_id,t1.time_stamp,cl_type as os,t1.city_id as user_city_id
from
( --新首页卡片曝光
SELECT partition_date,city_id,cl_type,cl_id,card_id,max(time_stamp) as time_stamp
FROM online.ml_community_precise_exposure_detail
where partition_date>='{startDay}' and partition_date<='{endDay}'
and action in ('page_precise_exposure','home_choiceness_card_exposure')
and cl_id IS NOT NULL
and card_id IS NOT NULL
and is_exposure='1'
--and page_name='home'
--and tab_name='精选'
--and page_name in ('home','search_result_more')
and ((page_name='home' and tab_name='精选') or (page_name='category' and tab_name = '商品'))
and card_type in ('card','video')
and card_content_type in ('service')
and (get_json_object(exposure_card,'$.in_page_pos') is null or get_json_object(exposure_card,'$.in_page_pos') != 'seckill')
group by partition_date,city_id,cl_type,cl_id,card_id,app_session_id
) t1
join
( --渠道,新老
SELECT distinct device_id
FROM online.ml_device_day_active_status
where partition_date>='{startDay}' and partition_date<='{endDay}'
AND active_type in ('1','2','4')
and first_channel_source_type not in ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei','','unknown')
AND first_channel_source_type not like 'promotion\_jf\_%'
) t2
on t1.cl_id = t2.device_id
LEFT JOIN
( --去除黑名单
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY =regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)t3
on t3.device_id=t2.device_id
WHERE t3.device_id is null
""".format(startDay=start,endDay=end)
print(sql)
return sql
def getClickSql2(start, end):
sql = """
SELECT DISTINCT t1.partition_date, t1.cl_id device_id, t1.business_id card_id,t1.time_stamp time_stamp,t1.page_stay as page_stay
FROM
(select partition_date,cl_id,business_id,action,page_name,page_stay,time_stamp,page_stay
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date BETWEEN '{}' AND '{}'
AND page_name='welfare_detail'
AND page_stay>=1
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
AND business_id rlike '{}'
) AS t1
JOIN
(select partition_date,active_type,first_channel_source_type,device_id
from online.ml_device_day_active_status
where partition_date BETWEEN '{}' AND '{}'
AND active_type IN ('1', '2', '4')
AND first_channel_source_type not IN ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei')
AND first_channel_source_type not LIKE 'promotion\\_jf\\_%') as t2
ON t1.cl_id = t2.device_id
AND t1.partition_date = t2.partition_date
LEFT JOIN
(
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY = regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)dev
on t1.cl_id=dev.device_id
WHERE dev.device_id is null
""".format(start, end, ACTION_REG, start, end)
print(sql)
return sql
def getExposureSql2(start, end):
sql = """
SELECT DISTINCT t1.partition_date,t1.cl_id device_id,t1.card_id,t1.time_stamp, 0 as page_stay
FROM
(SELECT partition_date,cl_id,card_id,time_stamp
FROM online.ml_community_precise_exposure_detail
WHERE cl_id IS NOT NULL
AND card_id IS NOT NULL
AND card_id rlike '{}'
AND action='page_precise_exposure'
AND card_content_type = '{}'
AND is_exposure = 1 ) AS t1
LEFT JOIN online.ml_device_day_active_status AS t2 ON t1.cl_id = t2.device_id
AND t1.partition_date = t2.partition_date
LEFT JOIN
( SELECT DISTINCT device_id
FROM ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
WHERE PARTITION_DAY = regexp_replace(DATE_SUB(CURRENT_DATE,1),'-','')
AND is_abnormal_device = 'true' )dev
ON t1.cl_id=dev.device_id
WHERE dev.device_id IS NULL
AND t2.partition_date BETWEEN '{}' AND '{}'
AND t2.active_type IN ('1',
'2',
'4')
AND t2.first_channel_source_type NOT IN ('yqxiu1',
'yqxiu2',
'yqxiu3',
'yqxiu4',
'yqxiu5',
'mxyc1',
'mxyc2',
'mxyc3' ,
'wanpu',
'jinshan',
'jx',
'maimai',
'zhuoyi',
'huatian',
'suopingjingling',
'mocha',
'mizhe',
'meika',
'lamabang' ,
'js-az1',
'js-az2',
'js-az3',
'js-az4',
'js-az5',
'jfq-az1',
'jfq-az2',
'jfq-az3',
'jfq-az4',
'jfq-az5',
'toufang1' ,
'toufang2',
'toufang3',
'toufang4',
'toufang5',
'toufang6',
'TF-toufang1',
'TF-toufang2',
'TF-toufang3',
'TF-toufang4' ,
'TF-toufang5',
'tf-toufang1',
'tf-toufang2',
'tf-toufang3',
'tf-toufang4',
'tf-toufang5',
'benzhan',
'promotion_aso100' ,
'promotion_qianka',
'promotion_xiaoyu',
'promotion_dianru',
'promotion_malioaso',
'promotion_malioaso-shequ' ,
'promotion_shike',
'promotion_julang_jl03',
'promotion_zuimei')
AND t2.first_channel_source_type NOT LIKE 'promotion\\_jf\\_%'
""".format(ACTION_REG, CONTENT_TYPE, start, end)
print(sql)
return sql
def connectDoris(spark, table):
return spark.read \
.format("jdbc") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("url", "jdbc:mysql://172.16.30.136:3306/doris_prod") \
.option("dbtable", table) \
.option("user", "doris") \
.option("password", "o5gbA27hXHHm") \
.load()
def get_spark(appName):
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
spark = (SparkSession
.builder
.config(conf=sparkConf)
.appName(appName)
.enableHiveSupport()
.getOrCreate())
return spark
def init_es_query():
q = {
"_source": {
"includes":[]
},
"query": {
"bool": {
"must": [{"term": {"is_online": True}}],
"must_not": [],
"should": []
}
}
}
return q
def parseSource(_source):
id = str(_source.setdefault("id",-1))
discount = _source.setdefault("discount",0)
case_count = _source.setdefault("case_count",0)
sales_count = _source.setdefault("sales_count",0)
service_type = str(_source.setdefault("service_type",-1))
second_demands = ','.join(_source.setdefault("second_demands",["-1"]))
second_solutions = ','.join(_source.setdefault("second_solutions",["-1"]))
second_positions = ','.join(_source.setdefault("second_positions",["-1"]))
tags_v3 = ','.join(_source.setdefault("tags_v3", ["-1"]))
# sku
sku_list = _source.setdefault("sku_list",[])
sku_tags_list = []
sku_show_tags_list = []
sku_price_list = []
for sku in sku_list:
sku_tags_list += sku.setdefault("sku_tags",[])
# sku_tags_list += sku.setdefault("sku_tags_id",[])
sku_show_tags_list.append(sku.setdefault("show_project_type_name",""))
price = sku.setdefault("price", 0.0)
if price > 0:
sku_price_list.append(price)
# sku_tags = ",".join([str(i) for i in sku_tags_list]) if len(sku_tags_list) > 0 else "-1"
# sku_show_tags = ",".join(sku_show_tags_list) if len(sku_show_tags_list) > 0 else "-1"
sku_price = min(sku_price_list) if len(sku_price_list) > 0 else 0.0
#merchant_id
merchant_id = str(_source.setdefault("merchant_id","-1"))
# doctor_type id famous_doctor
doctor = _source.setdefault("doctor",{})
doctor_type = str(doctor.setdefault("doctor_type","-1"))
doctor_id = str(doctor.setdefault("id","-1"))
doctor_famous = str(int(doctor.setdefault("famous_doctor",False)))
# hospital id city_tag_id hospital_type is_high_quality
hospital = doctor.setdefault("hospital", {})
hospital_id = str(hospital.setdefault("id", "-1"))
hospital_city_tag_id = str(hospital.setdefault("city_tag_id", -1))
hospital_type = str(hospital.setdefault("hospital_type", "-1"))
hospital_is_high_quality = str(int(hospital.setdefault("is_high_quality", False)))
data = [id,
discount,
case_count,
sales_count,
service_type,
merchant_id,
doctor_type,
doctor_id,
doctor_famous,
hospital_id,
hospital_city_tag_id,
hospital_type,
hospital_is_high_quality,
second_demands,
second_solutions,
second_positions,
tags_v3,
# sku_show_tags,
sku_price
]
return data
# es中获取特征
def get_service_feature_df():
es_columns = ["id","discount", "sales_count", "doctor", "case_count", "service_type","merchant_id","second_demands", "second_solutions", "second_positions", "sku_list","tags_v3"]
query = init_es_query()
query["_source"]["includes"] = es_columns
print(json.dumps(query), flush=True)
es_cli = getEsConn()
scan_re = scan(client=es_cli, index=ES_INDEX, query=query, scroll='3m')
datas = []
for res in scan_re:
_source = res['_source']
data = parseSource(_source)
datas.append(data)
print("item size:",len(datas))
itemColumns = ['id','discount', 'case_count', 'sales_count', 'service_type','merchant_id',
'doctor_type', 'doctor_id', 'doctor_famous', 'hospital_id', 'hospital_city_tag_id', 'hospital_type',
'hospital_is_high_quality', 'second_demands','second_solutions', 'second_positions',
'tags_v3','sku_price']
# 'sku_tags','sku_show_tags','sku_price']
df = pd.DataFrame(datas,columns=itemColumns)
return df
def addDays(n, format="%Y%m%d"):
return (date.today() + timedelta(days=n)).strftime(format)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
if __name__ == '__main__':
start = time.time()
#入参
trainDays = int(sys.argv[1])
print('trainDays:{}'.format(trainDays),flush=True)
endDay = addDays(0)
startDay = addDays(-int(trainDays))
print("train_data start:{} end:{}".format(startDay,endDay))
spark = get_spark("service_feature_csv_export")
spark.sparkContext.setLogLevel("ERROR")
# itemDF = get_service_feature_df()
# print(itemDF.columns)
# print(itemDF.head(100))
# 行为数据
clickSql = getClickSql(startDay,endDay)
expSql = getExposureSql(startDay,endDay)
clickDF = spark.sql(clickSql)
clickDF.createOrReplaceTempView("clickDF")
clickDF.cache()
clickDF.show(100, False)
expDF = spark.sql(expSql)
expDF.createOrReplaceTempView("expDF")
print("expDF before count: ", expDF.count())
# ratingDF = samplesNegAndUnion(clickDF,expDF)
expDF = spark.sql("""
SELECT t1.partition_date, t1.device_id, t1.card_id, t1.time_stamp, t1.os, t1.user_city_id
FROM expDF t1
LEFT JOIN clickDF t2
ON t1.partition_date = t2.partition_date
AND t1.device_id = t2.device_id
AND t1.card_id = t2.card_id
AND t1.os = t2.os
AND t1.user_city_id = t2.user_city_id
WHERE t2.device_id is NULL
""")
print("expDF after count: ", expDF.count())
print("click count: ", clickDF.count())
print("添加label...")
clickDF = clickDF.withColumn("label", F.lit(1))
expDF = expDF.withColumn("label", F.lit(0))
ratingDF = clickDF.union(expDF)
ratingDF = ratingDF.withColumnRenamed("time_stamp", "timestamp")\
.withColumnRenamed("device_id", "userid")\
.withColumnRenamed("card_id", "item_id")\
.withColumnRenamed("page_stay", "rating")\
.withColumnRenamed("os", "user_os")\
.withColumn("user_city_id", F.when(F.col("user_city_id").isNull(), "-1").otherwise(F.col("user_city_id")))\
.withColumn("timestamp",F.col("timestamp").cast("long"))
print(ratingDF.columns)
print(ratingDF.show(100, truncate=False))
sys.exit(1)
#TODO 负样本为排除点击的数据
# ratingSamplesWithLabel = addSampleLabel(ratingDF)
df = ratingDF.toPandas()
df = pd.DataFrame(df)
posCount = df.loc[df["label"]==1]["label"].count()
negCount = df.loc[df["label"]==0]["label"].count()
print("pos size:"+str(posCount),"neg size:"+str(negCount))
itemDF = get_service_feature_df()
print(itemDF.columns)
print(itemDF.head(10))
# itemDF.to_csv("/tmp/service_{}.csv".format(endDay))
# df.to_csv("/tmp/service_train_{}.csv".format(endDay))
# 数据字典
dataVocab = {}
multiVocab = {}
print("处理item特征...")
timestmp1 = int(round(time.time()))
itemDF = addItemFeatures(itemDF, dataVocab,multiVocab)
timestmp2 = int(round(time.time()))
print("处理item特征, 耗时s:{}".format(timestmp2 - timestmp1))
print("multiVocab:")
for k,v in multiVocab.items():
print(k,len(v))
print("dataVocab:")
for k, v in dataVocab.items():
print(k, len(v), v)
itemDF_spark = spark.createDataFrame(itemDF)
itemDF_spark.printSchema()
itemDF_spark.show(10, truncate=False)
sys.exit(1)
# item统计特征处理
itemStaticDF = addItemStaticFeatures(ratingSamplesWithLabel,itemDF_spark,dataVocab)
# 统计数据处理
# ratingSamplesWithLabel = addStaticsFeatures(ratingSamplesWithLabel,dataVocab)
samples = ratingSamplesWithLabel.join(itemStaticDF, on=['item_id'], how='inner')
print("处理user特征...")
samplesWithUserFeatures = addUserFeatures(samples,dataVocab,multiVocab)
timestmp3 = int(round(time.time()))
print("处理user特征, 耗时s:{}".format(timestmp3 - timestmp2))
#
# user columns
user_columns = [c for c in samplesWithUserFeatures.columns if c.startswith("user")]
print("collect feature for user:{}".format(str(user_columns)))
# item columns
item_columns = [c for c in itemStaticDF.columns if c.startswith("item")]
print("collect feature for item:{}".format(str(item_columns)))
# model columns
print("model columns to redis...")
model_columns = user_columns + item_columns
featureColumnsToRedis(model_columns)
print("数据字典save...")
print("dataVocab:", str(dataVocab.keys()))
vocab_path = "../vocab/{}_vocab.json".format(VERSION)
dataVocabStr = json.dumps(dataVocab, ensure_ascii=False)
open(configUtils.VOCAB_PATH, mode='w', encoding='utf-8').write(dataVocabStr)
# item特征数据存入redis
itemFeaturesToRedis(itemStaticDF, FEATURE_ITEM_KEY)
timestmp6 = int(round(time.time()))
print("item feature to redis 耗时s:{}".format(timestmp6 - timestmp3))
"""特征数据存入redis======================================"""
# user特征数据存入redis
userFeaturesToRedis(samplesWithUserFeatures, user_columns, "user", FEATURE_USER_KEY)
timestmp5 = int(round(time.time()))
print("user feature to redis 耗时s:{}".format(timestmp5 - timestmp6))
"""训练数据保存 ======================================"""
timestmp3 = int(round(time.time()))
train_columns = model_columns + ["label", "timestamp", "rating"]
trainSamples = samplesWithUserFeatures.select(*train_columns)
train_df = trainSamples.toPandas()
train_df = pd.DataFrame(train_df)
train_df.to_csv(DATA_PATH_TRAIN,sep="|")
timestmp4 = int(round(time.time()))
print("训练数据写入success 耗时s:{}".format(timestmp4 - timestmp3))
print("总耗时m:{}".format((timestmp4 - start)/60))
spark.stop()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment