Commit 338ffc4e authored by 郭羽's avatar 郭羽

美购精排模型

parent 0499a77c
import tensorflow as tf
import json
import pandas as pd
import time
import utils.connUtils as connUtils
ITEM_NUMBER_COLUMNS = ["smart_rank2"]
embedding_columns = ["itemid","userid","doctor_id","hospital_id"]
multi_columns = ["tags_v3","first_demands","second_demands","first_solutions","second_solutions","first_positions","second_positions"]
one_hot_columns = ["service_type","doctor_type","doctor_famous","hospital_city_tag_id","hospital_type","hospital_is_high_quality"]
# history_columns = ["userRatedHistory"]
# 数据加载
data_path_train = "/Users/zhigangzheng/Desktop/work/guoyu/service_sort/train/part-00000-a61205d1-ad4e-4fa7-895d-ad8db41189e6-c000.csv"
data_path_test = "/Users/zhigangzheng/Desktop/work/guoyu/service_sort/train/part-00000-a61205d1-ad4e-4fa7-895d-ad8db41189e6-c000.csv"
# data_path_train = "/data/files/service_feature_train.csv"
# data_path_test = "/data/files/service_feature_test.csv"
version = "v1"
model_file = "service_mlp_"+version
#数据字典
def getDataVocabFromRedis(version):
conn = connUtils.getRedisConn()
key = "Strategy:rec:vocab:service:"+version
dataVocabStr = conn.get(key)
if dataVocabStr:
dataVocab = json.loads(dataVocabStr,encoding='utf-8')
else:
dataVocab = None
print("-----data_vocab-----")
for k, v in data_vocab.items():
print(k, len(v))
return dataVocab
# 数据类型转换
def csvTypeConvert(df,data_vocab):
# 离散na值填充
for k, v in data_vocab.items():
df[k] = df[k].fillna("-1")
df[k] = df[k].astype("string")
for k in ITEM_NUMBER_COLUMNS:
df[k] = df[k].fillna(0.0)
df[k] = df[k].astype("float")
df["label"] = df["label"].astype("int")
return df
def loadData(data_path):
print("读取数据...")
timestmp1 = int(round(time.time() * 1000))
df = pd.read_csv(data_path, sep="|")
timestmp2 = int(round(time.time() * 1000))
print("读取数据耗时ms:{}".format(timestmp2 - timestmp1))
return df
def getDataSet(df,shuffleSize = 10000,batchSize=128):
# print(df.dtypes)
labels = df.pop('label')
dataSet = tf.data.Dataset.from_tensor_slices((dict(df), labels)).shuffle(shuffleSize).batch(batchSize)
return dataSet
def getTrainColumns(train_columns,data_vocab):
columns = []
# 离散特征
for feature in train_columns:
if data_vocab.get(feature):
if feature.startswith("userRatedHistory") or feature.count("__") > 0 or feature in embedding_columns:
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(key=feature, vocabulary_list=data_vocab[feature])
col = tf.feature_column.embedding_column(cat_col, 10)
columns.append(col)
elif feature in one_hot_columns or feature.count("Bucket") > 0:
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(key=feature, vocabulary_list=data_vocab[feature])
col = tf.feature_column.indicator_column(cat_col)
columns.append(col)
elif feature in ITEM_NUMBER_COLUMNS or feature.endswith("RatingAvg") or feature.endswith("RatingStddev"):
col = tf.feature_column.numeric_column(feature)
columns.append(col)
return columns
def train(columns,train_dataset):
model = tf.keras.Sequential([
tf.keras.layers.DenseFeatures(columns),
tf.keras.layers.DenseFeatures(columns),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
# compile the model, set loss function, optimizer and evaluation metrics
model.compile(
loss='mse',
optimizer='adam',
metrics=['accuracy', tf.keras.metrics.AUC(curve='ROC'), tf.keras.metrics.AUC(curve='PR')])
# train the model
print("train start...")
model.fit(train_dataset, epochs=5)
print("train end...")
print("train save...")
model.save(model_file, include_optimizer=False, save_format='tf')
def evaluate(model,test_dataset):
# evaluate the model
timestmp1 = int(round(time.time()))
print("evaluate:")
test_loss, test_accuracy, test_roc_auc, test_pr_auc = model.evaluate(test_dataset)
print('\n\nTest Loss {}, Test Accuracy {}, Test ROC AUC {}, Test PR AUC {}'.format(test_loss, test_accuracy,
test_roc_auc, test_pr_auc))
print("验证耗时s:", int(round(time.time())) - timestmp1)
if __name__ == '__main__':
# redis中加载数据字典
print("redis 中加载模型字典...")
data_vocab = getDataVocabFromRedis(version)
assert not data_vocab
print("读取数据...")
timestmp1 = int(round(time.time() * 1000))
df_train = loadData(data_path_train)
df_test = loadData(data_path_test)
timestmp2 = int(round(time.time() * 1000))
print("读取数据耗时ms:{}".format(timestmp2 - timestmp1))
df_train = df_train[list(data_vocab.keys()) + ITEM_NUMBER_COLUMNS + ["label"]]
df_test = df_test[list(data_vocab.keys()) + ITEM_NUMBER_COLUMNS + ["label"]]
trainSize = df_train["label"].count()
testSize = df_test["label"].count()
print("trainSize:{},testSize{}".format(trainSize,testSize))
# 数据类型转换
df_train = csvTypeConvert(df_train)
df_test = csvTypeConvert(df_test)
columns = df_train.columns.tolist()
# 获取训练数据
train_data = getDataSet(df_train,shuffleSize=trainSize,)
test_data = getDataSet(df_test,shuffleSize=testSize)
# 获取训练列
columns = getTrainColumns(columns,data_vocab)
model = train(columns,train_data)
# evaluate(model,test_data)
pass
dataPath=/data/files
content_type="service"
cd $dataPath
function mergeGetFile(){
if [ ! -n "$1" ];then
echo "dir Doesn't exist,don't run this shell"
exit 1
fi
rm -f $dataPath/$1.csv
logging "rm -f $dataPath/$1.csv"
/opt/hadoop/bin/hdfs dfs -getmerge /$1 $dataPath/$1.csv
logging "/opt/hadoop/bin/hdfs dfs -getmerge /$1 $dataPath/$1.csv success"
head -1 $1.csv > $1.csv.head
cat $1.csv|grep -v `cat $1.csv.head` >> $1.csv.head
mv $1.csv.head $1.csv
/opt/hadoop/bin/hdfs dfs -rmr /$1
logging "/opt/hadoop/bin/hdfs dfs -rmr /$1 success"
}
mergeGetFile ${content_type}_feature_train
mergeGetFile ${content_type}_feature_test
\ No newline at end of file
path=/srv/apps/serviceRec
day_count=$1
content_type="service"
pythonFile=${path}/shell/service_feature_csv_export.py
#log_file=~/${content_type}_feature_csv_export.log
/opt/hadoop/bin/hdfs dfs -rmr /${content_type}_feature_train
/opt/hadoop/bin/hdfs dfs -rmr /${content_type}_feature_test
/opt/spark/bin/spark-submit --master yarn --deploy-mode client --queue root.strategy --driver-memory 16g --executor-memory 1g --executor-cores 1 --num-executors 70 --conf spark.default.parallelism=100 --conf spark.storage.memoryFraction=0.5 --conf spark.shuffle.memoryFraction=0.3 --conf spark.locality.wait=0 --jars /srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar ${pythonFile} $day_count
import sys
from datetime import date, timedelta,datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
import time
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql as sql
from pyspark.sql.functions import when,col
from pyspark.sql.types import *
from pyspark.sql import functions as F
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, QuantileDiscretizer, MinMaxScaler
from collections import defaultdict
import json
import os
import utils.connUtils as connUtils
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
os.environ["PYSPARK_PYTHON"]="/usr/bin/python3"
"""
特征工程
"""
ITEM_MULTI_COLUMN_EXTRA_MAP = {"first_demands": 10,
"second_demands": 30,
"first_solutions": 2,
"second_solutions": 14,
"first_positions": 1,
"second_positions": 20,
"tags_v3": 30,
}
ITEM_NUMBER_COLUMNS = ["lowest_price","smart_rank2","case_count","ordered_user_ids_count"]
ITEM_CATE_COLUMNS = ["service_type","doctor_type","doctor_id","doctor_famous","hospital_id","hospital_city_tag_id","hospital_type","hospital_is_high_quality"]
NUMBER_PRECISION = 2
def addItemFeatures(samples,itemDF):
itemDF = itemDF.withColumnRenamed("id", "itemid")
samples = samples.join(itemDF, on=['itemid'], how='left')
# 数据过滤:无医生
samples = samples.filter(col("doctor_id")!="-1")
# null处理
for c in ITEM_NUMBER_COLUMNS:
print("null count:",c,samples.filter(col(c).isNull()).count())
samples = samples.withColumn(c,when(col(c).isNull(),0).otherwise(col(c)).cast("float"))
for c in ITEM_CATE_COLUMNS:
print("null count:", c, samples.filter(col(c).isNull()).count())
samples = samples.withColumn(c, F.when(F.col(c).isNull(), "-1").otherwise(F.col(c)))
# 离散特征处理
for c, v in ITEM_MULTI_COLUMN_EXTRA_MAP.items():
for i in range(1, v + 1):
new_c = c + "__" + str(i)
samples = samples.withColumn(new_c, F.split(F.col(c), ",")[i - 1])
samples = samples.withColumn(new_c, F.when(F.col(new_c).isNull(), "-1").otherwise(F.col(new_c)))
# 统计特征处理
staticFeatures = samples.groupBy('itemid').agg(F.count(F.lit(1)).alias('itemRatingCount'),
F.format_number(F.avg(F.col('rating')),NUMBER_PRECISION).alias('itemRatingAvg'),
F.stddev(F.col('rating')).alias('itemRatingStddev')).fillna(0)\
.withColumn('itemRatingStddev', F.format_number(F.col('itemRatingStddev'), NUMBER_PRECISION))
# join item rating features
samples = samples.join(staticFeatures, on=['itemid'], how='left')
# 连续特征处理
pipelineStage = []
# Normalization
# for c in ["itemRatingAvg","itemRatingStddev"]:
# pipelineStage.append(MinMaxScaler(inputCol=c, outputCol=c+"Scale"))
# bucketing
for c in ["case_count", "ordered_user_ids_count","itemRatingCount","lowest_price"]:
pipelineStage.append(QuantileDiscretizer(numBuckets=10, inputCol=c, outputCol=c + "Bucket"))
featurePipeline = Pipeline(stages=pipelineStage)
samples = featurePipeline.fit(samples).transform(samples)
samples.printSchema()
samples.show(5, truncate=False)
return samples
def extractTags(genres_list):
genres_dict = defaultdict(int)
for genres in genres_list:
for genre in genres.split(','):
genres_dict[genre] += 1
sortedGenres = sorted(genres_dict.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in sortedGenres]
def addUserFeatures(samples):
extractTagsUdf = F.udf(extractTags, ArrayType(StringType()))
samples = samples.withColumnRenamed("cl_id","userid")
# user历史记录
# samples = samples\
# .withColumn('userPositiveHistory',F.collect_list(when(F.col('label') == 1, F.col('itemid')).otherwise(F.lit(None))).over(sql.Window.partitionBy("userid").orderBy(F.col("timestamp")).rowsBetween(-100, -1))) \
# .withColumn("userPositiveHistory", F.reverse(F.col("userPositiveHistory")))
# for i in range(1,11):
# samples = samples.withColumn("userRatedHistory"+str(i), F.when(F.col("userPositiveHistory")[i-1].isNotNull(),F.col("userPositiveHistory")[i-1]).otherwise("-1"))
# samples = samples.drop("userPositiveHistory")
# user历史点击分值统计
samples = samples\
.withColumn('userRatingCount',F.count(F.lit(1)).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1))) \
.withColumn("userRatingAvg", F.format_number(F.avg(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),NUMBER_PRECISION)) \
.withColumn("userRatingStddev", F.format_number(F.stddev(F.col("rating")).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)),NUMBER_PRECISION)) \
.filter(F.col("userRatingCount") > 1)
# user偏好
for c in ["first_demands","second_demands","first_solutions","second_solutions","first_positions","second_positions"]:
new_col = "user" + "__"+c
samples = samples\
.withColumn(new_col, extractTagsUdf(F.collect_list(when(F.col('label') == 1, F.col(c)).otherwise(F.lit(None))).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1)))) \
.withColumn(new_col+"__1", F.when(F.col(new_col)[0].isNotNull(),F.col(new_col)[0]).otherwise("-1")) \
.withColumn(new_col+"__2", F.when(F.col(new_col)[1].isNotNull(),F.col(new_col)[1]).otherwise("-1")) \
.withColumn(new_col+"__3", F.when(F.col(new_col)[2].isNotNull(),F.col(new_col)[2]).otherwise("-1")) \
.drop(new_col)
# .drop(c).drop(new_col)
# tags
c = "tags_v3"
new_col = "user" + "__" + c
samples = samples.withColumn(new_col, extractTagsUdf(F.collect_list(when(F.col('label') == 1, F.col(c)).otherwise(F.lit(None))).over(sql.Window.partitionBy('userid').orderBy('timestamp').rowsBetween(-100, -1))))
for i in range(1,10):
samples = samples.withColumn(new_col+"__"+str(i), F.when(F.col(new_col)[i-1].isNotNull(),F.col(new_col)[i-1]).otherwise("-1"))
samples = samples.drop(new_col)
pipelineStage = []
# Normalization
# for c in ["userRatingAvg", "userRatingStddev"]:
# pipelineStage.append(MinMaxScaler(inputCol=c, outputCol=c + "Scale"))
# bucketing
for c in ["userRatingCount"]:
pipelineStage.append(QuantileDiscretizer(numBuckets=10, inputCol=c, outputCol=c + "Bucket"))
featurePipeline = Pipeline(stages=pipelineStage)
samples = featurePipeline.fit(samples).transform(samples)
samples.printSchema()
samples.show(10)
return samples
def addSampleLabel(ratingSamples):
ratingSamples = ratingSamples.withColumn('label', when(F.col('rating') >= 1, 1).otherwise(0))
ratingSamples.show(5, truncate=False)
ratingSamples.printSchema()
return ratingSamples
def samplesNegAndUnion(samplesPos,samplesNeg):
# 正负样本 1:4
pos_count = samplesPos.count()
neg_count = samplesNeg.count()
print("before filter posSize:{},negSize:{}".format(str(pos_count), str(neg_count)))
samplesNeg = samplesNeg.sample(pos_count * 4 / neg_count)
samples = samplesNeg.union(samplesPos)
dataSize = samples.count()
print("dataSize:{}".format(str(dataSize)))
return samples
def splitAndSaveTrainingTestSamplesByTimeStamp(samples,splitTimestamp, file_path):
samples = samples.withColumn("timestampLong", F.col("timestamp").cast(LongType()))
# quantile = smallSamples.stat.approxQuantile("timestampLong", [0.8], 0.05)
# splitTimestamp = quantile[0]
train = samples.where(F.col("timestampLong") <= splitTimestamp).drop("timestampLong")
test = samples.where(F.col("timestampLong") > splitTimestamp).drop("timestampLong")
print("split train size:{},test size:".format(str(train.count()),str(test.count())))
trainingSavePath = file_path + '_train'
testSavePath = file_path + '_test'
train.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(trainingSavePath)
test.write.option("header", "true").option("delimiter", "|").mode('overwrite').csv(testSavePath)
def getDataVocab(samples):
dataVocab = {}
multiVocab = {}
for c in ITEM_MULTI_COLUMN_EXTRA_MAP.keys():
datas = samples.select(c).distinct().collect()
tagSet = set()
for d in datas:
if d[c]:
for tag in d[c].split(","):
tagSet.add(tag)
tagSet.add("-1") #空值默认
multiVocab[c] = list(tagSet)
samples = samples.drop(c)
for c in samples.columns:
# 判断是否以Bucket结尾 和 类别特征
if c.endswith("Bucket") or c in ITEM_CATE_COLUMNS:
datas = samples.select(c).distinct().collect()
vocabSet = set()
for d in datas:
if d[c]:
vocabSet.add(str(d[c]))
vocabSet.add("-1")# 空值的默认
dataVocab[c] = list(vocabSet)
else:
# 判断是否多值离散列
for cc, v in multiVocab.items():
if c.count(cc) > 0:
dataVocab[c] = v
return dataVocab
def dataVocabToRedis(dataVocab,version="v1"):
conn = connUtils.getRedisConn()
key = "Strategy:rec:vocab:service:"+version
conn.set(key,dataVocab)
conn.expire(key,60 * 60 * 24 * 7)
def featureToRedis():
pass
"""
数据加载
"""
CONTENT_TYPE = "service"
SERVICE_HOSTS = [
{'host': "172.16.52.25", 'port': 9200},
{'host': "172.16.52.26", 'port': 9200},
{'host': "172.16.52.36", 'port': 9200}
]
ES_INDEX = "gm-dbmw-service-read"
ES_INDEX_TEST = "gm_test-service-read"
ACTION_REG = r"""^\\d+$"""
def getEsConn_test():
host_config = [{'host': '172.18.52.14', 'port': 9200}, {'host': '172.18.52.133', 'port': 9200},
{'host': '172.18.52.7', 'port': 9200}]
return Elasticsearch(host_config, http_auth=('elastic', 'gm_test'), timeout=3600)
def getEsConn():
return Elasticsearch(SERVICE_HOSTS, http_auth=('elastic', 'gengmei!@#'), timeout=3600)
def getClickData(spark, start, end):
positiveSql = """
SELECT DISTINCT t1.partition_date, t1.cl_id device_id, t1.business_id card_id,t1.time_stamp time_stamp,t1.page_stay as page_stay
FROM
(select partition_date,cl_id,business_id,action,page_name,page_stay,time_stamp,page_stay
from online.bl_hdfs_maidian_updates
where action = 'page_view'
AND partition_date BETWEEN '{}' AND '{}'
AND page_name='welfare_detail'
AND page_stay>=1
AND cl_id is not null
AND cl_id != ''
AND business_id is not null
AND business_id != ''
AND business_id rlike '{}'
) AS t1
JOIN
(select partition_date,active_type,first_channel_source_type,device_id
from online.ml_device_day_active_status
where partition_date BETWEEN '{}' AND '{}'
AND active_type IN ('1', '2', '4')
AND first_channel_source_type not IN ('yqxiu1','yqxiu2','yqxiu3','yqxiu4','yqxiu5','mxyc1','mxyc2','mxyc3'
,'wanpu','jinshan','jx','maimai','zhuoyi','huatian','suopingjingling','mocha','mizhe','meika','lamabang'
,'js-az1','js-az2','js-az3','js-az4','js-az5','jfq-az1','jfq-az2','jfq-az3','jfq-az4','jfq-az5','toufang1'
,'toufang2','toufang3','toufang4','toufang5','toufang6','TF-toufang1','TF-toufang2','TF-toufang3','TF-toufang4'
,'TF-toufang5','tf-toufang1','tf-toufang2','tf-toufang3','tf-toufang4','tf-toufang5','benzhan','promotion_aso100'
,'promotion_qianka','promotion_xiaoyu','promotion_dianru','promotion_malioaso','promotion_malioaso-shequ'
,'promotion_shike','promotion_julang_jl03','promotion_zuimei')
AND first_channel_source_type not LIKE 'promotion\\_jf\\_%') as t2
ON t1.cl_id = t2.device_id
AND t1.partition_date = t2.partition_date
LEFT JOIN
(
select distinct device_id
from ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
where PARTITION_DAY = regexp_replace(DATE_SUB(current_date,1) ,'-','')
AND is_abnormal_device = 'true'
)dev
on t1.cl_id=dev.device_id
WHERE dev.device_id is null
""".format(start, end, ACTION_REG, start, end)
print(positiveSql)
return spark.sql(positiveSql)
def getExposureData(spark, start, end):
negSql = """
SELECT DISTINCT t1.partition_date,t1.cl_id device_id,t1.card_id,t1.time_stamp, 0 as page_stay
FROM
(SELECT *
FROM online.ml_community_precise_exposure_detail
WHERE cl_id IS NOT NULL
AND card_id IS NOT NULL
AND card_id rlike '{}'
AND action='page_precise_exposure'
AND card_content_type = '{}'
AND is_exposure = 1 ) AS t1
LEFT JOIN online.ml_device_day_active_status AS t2 ON t1.cl_id = t2.device_id
AND t1.partition_date = t2.partition_date
LEFT JOIN
( SELECT DISTINCT device_id
FROM ML.ML_D_CT_DV_DEVICECLEAN_DIMEN_D
WHERE PARTITION_DAY = regexp_replace(DATE_SUB(CURRENT_DATE,1),'-','')
AND is_abnormal_device = 'true' )dev
ON t1.cl_id=dev.device_id
WHERE dev.device_id IS NULL
AND t2.partition_date BETWEEN '{}' AND '{}'
AND t2.active_type IN ('1',
'2',
'4')
AND t2.first_channel_source_type NOT IN ('yqxiu1',
'yqxiu2',
'yqxiu3',
'yqxiu4',
'yqxiu5',
'mxyc1',
'mxyc2',
'mxyc3' ,
'wanpu',
'jinshan',
'jx',
'maimai',
'zhuoyi',
'huatian',
'suopingjingling',
'mocha',
'mizhe',
'meika',
'lamabang' ,
'js-az1',
'js-az2',
'js-az3',
'js-az4',
'js-az5',
'jfq-az1',
'jfq-az2',
'jfq-az3',
'jfq-az4',
'jfq-az5',
'toufang1' ,
'toufang2',
'toufang3',
'toufang4',
'toufang5',
'toufang6',
'TF-toufang1',
'TF-toufang2',
'TF-toufang3',
'TF-toufang4' ,
'TF-toufang5',
'tf-toufang1',
'tf-toufang2',
'tf-toufang3',
'tf-toufang4',
'tf-toufang5',
'benzhan',
'promotion_aso100' ,
'promotion_qianka',
'promotion_xiaoyu',
'promotion_dianru',
'promotion_malioaso',
'promotion_malioaso-shequ' ,
'promotion_shike',
'promotion_julang_jl03',
'promotion_zuimei')
AND t2.first_channel_source_type NOT LIKE 'promotion\\_jf\\_%'
""".format(ACTION_REG, CONTENT_TYPE, start, end)
print(negSql)
return spark.sql(negSql)
def connectDoris(spark, table):
return spark.read \
.format("jdbc") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("url", "jdbc:mysql://172.16.30.136:3306/doris_prod") \
.option("dbtable", table) \
.option("user", "doris") \
.option("password", "o5gbA27hXHHm") \
.load()
def get_spark(appName):
sparkConf = SparkConf()
sparkConf.set("spark.sql.crossJoin.enabled", True)
sparkConf.set("spark.debug.maxToStringFields", "100")
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf.set("spark.hive.mapred.supports.subdirectories", True)
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False)
spark = (SparkSession
.builder
.config(conf=sparkConf)
.appName(appName)
.enableHiveSupport()
.getOrCreate())
return spark
def init_es_query():
q = {
"_source": {
"includes":[]
},
"query": {
"bool": {
"must": [],
"must_not": [],
"should": []
}
}
}
return q
def parseSource(_source):
id = str(_source.setdefault("id",-1))
smart_rank2 = _source.setdefault("smart_rank2",0.0)
case_count = _source.setdefault("case_count",0)
service_type = str(_source.setdefault("service_type",-1))
first_demands = ','.join(_source.setdefault("first_demands",[]))
second_demands = ','.join(_source.setdefault("second_demands",[]))
first_solutions = ','.join(_source.setdefault("first_solutions",[]))
second_solutions = ','.join(_source.setdefault("second_solutions",[]))
first_positions = ','.join(_source.setdefault("first_positions",[]))
second_positions = ','.join(_source.setdefault("second_positions",[]))
tags_v3 = ','.join(_source.setdefault("tags_v3",[]))
ordered_user_ids_count = len(_source.setdefault("ordered_user_ids",[]))
lowest_price_arr = _source.setdefault("lowest_price",[])
lowest_price = lowest_price_arr[0].setdefault("price",0.0) if len(lowest_price_arr) > 0 else 0.0
# doctor_type id famous_doctor
doctor = _source.setdefault("doctor",{})
doctor_type = doctor.setdefault("doctor_type","-1")
doctor_id = doctor.setdefault("id","-1")
doctor_famous = str(int(doctor.setdefault("famous_doctor",False)))
# hospital id city_tag_id hospital_type is_high_quality
hospital = doctor.setdefault("hospital", {})
hospital_id = hospital.setdefault("id", "-1")
hospital_city_tag_id = str(hospital.setdefault("city_tag_id", -1))
hospital_type = hospital.setdefault("hospital_type", "-1")
hospital_is_high_quality = str(int(hospital.setdefault("is_high_quality", False)))
data = [id,
lowest_price,
smart_rank2,
case_count,
service_type,
ordered_user_ids_count,
doctor_type,
doctor_id,
doctor_famous,
hospital_id,
hospital_city_tag_id,
hospital_type,
hospital_is_high_quality,
first_demands,
second_demands,
first_solutions,
second_solutions,
first_positions,
second_positions,
tags_v3
]
return data
# es中获取特征
def get_service_feature_df(spark):
es_columns = ["id", "lowest_price", "smart_rank2", "doctor", "case_count", "service_type", "first_demands", "second_demands", "first_solutions", "second_solutions", "first_positions", "second_positions", "tags_v3","ordered_user_ids"]
query = init_es_query()
query["_source"]["includes"] = es_columns
print(json.dumps(query), flush=True)
es_cli = getEsConn()
scan_re = scan(client=es_cli, index=ES_INDEX, query=query, scroll='3m')
datas = []
for res in scan_re:
_source = res['_source']
data = parseSource(_source)
datas.append(data)
print(len(datas))
dataRDD = spark.sparkContext.parallelize(datas)
itemColumns = ['id', 'lowest_price', 'smart_rank2', 'case_count', 'service_type', 'ordered_user_ids_count',
'doctor_type', 'doctor_id', 'doctor_famous', 'hospital_id', 'hospital_city_tag_id', 'hospital_type',
'hospital_is_high_quality', 'first_demands', 'second_demands', 'first_solutions',
'second_solutions', 'first_positions', 'second_positions', 'tags_v3']
df = dataRDD.toDF(schema=itemColumns)
return df
# mysql中获取用户画像
def get_user_portrait(spark):
return spark.read \
.format("jdbc") \
.option("driver", "com.mysql.jdbc.Driver") \
.option("url", "jdbc:mysql://172.16.50.175:3306/doris_olap") \
.option("dbtable", "user_tag3_portrait") \
.option("user", "doris") \
.option("password", "o5gbA27hXHHm") \
.load()
def addDays(n, format="%Y%m%d"):
return (date.today() + timedelta(days=n)).strftime(format)
if __name__ == '__main__':
# start = time.time()
# #入参
# trainDays = int(sys.argv[1])
# print('trainDays:{}'.format(trainDays),flush=True)
# spark = get_spark("service_feature_csv_export")
# spark.sparkContext.setLogLevel("ERROR")
#
# endDay = addDays(-1)
# startDay = addDays(-(1 + int(trainDays)))
#
# print(startDay,endDay)
#
# itemDF = get_service_feature_df(spark)
# # 行为数据
# clickDF = getClickData(spark,startDay,endDay)
# exposureDF = getExposureData(spark,startDay,endDay)
# ratingDF = samplesNegAndUnion(clickDF,exposureDF)
conf = SparkConf().setAppName('featureEngineering').setMaster('local')
spark = SparkSession.builder.config(conf=conf).getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
itemDF = spark.read.format('csv').option('header', 'true').option('sep', '|').load("service_item.csv")
ratingDF = spark.read.format('csv').option('header', 'true').option('sep', '|').load("service_rating.csv")
ratingDF = ratingDF.withColumn("rating",F.when(col("label")>=1,1).otherwise(0))
ratingDF = ratingDF.withColumnRenamed("time_stamp", "timestamp")\
.withColumnRenamed("device_id", "userid")\
.withColumnRenamed("card_id", "itemid")\
.withColumnRenamed("page_stay", "rating")
print(itemDF.columns)
print(itemDF.show(10))
# print(userDF.columns)
# print(userDF.show(10))
print(ratingDF.columns)
print(ratingDF.show(10))
print("添加label...")
ratingSamplesWithLabel = addSampleLabel(ratingDF)
print("处理item特征...")
samplesWithItemFeatures = addItemFeatures(ratingSamplesWithLabel, itemDF)
print("处理user特征...")
samplesWithUserFeatures = addUserFeatures(samplesWithItemFeatures)
# 离散数据字典生成
print("数据字典生成...")
dataVocab = getDataVocab(samplesWithUserFeatures)
# 字典转为json 存入redis
print("数据字典存入redis...")
dataVocabStr = json.dumps(dataVocab,ensure_ascii=False)
dataVocabToRedis(dataVocabStr)
file_path = "/service_feature"
print("write to hdfs start...")
# splitTimestamp = int(time.mktime(time.strptime(endDay, "%Y%m%d")))
# splitAndSaveTrainingTestSamplesByTimeStamp(samplesWithUserFeatures, splitTimestamp, file_path)
print("write to hdfs success...")
# # user画像数据
# # userDF = get_user_portrait(spark)
#
# # 数据处理
#
# # 数据写入
# item_csv_dir = "/service_item"
# user_csv_dir = "/service_user"
# action_csv_dir = "/service_action"
# os.system("hdfs dfs -rmr {}".format(item_csv_dir))
# os.system("hdfs dfs -rmr {}".format(user_csv_dir))
# os.system("hdfs dfs -rmr {}".format(action_csv_dir))
#
# itemDF.write.option("header", "true").option("delimiter", "|").csv(item_csv_dir)
# print("service_item write successful", flush=True)
#
# user_endDay = addDays(-1,format="%Y-%m-%d")
# user_startDay = addDays(-(1 + int(trainDays)),format="%Y-%m-%d")
# userTmpTable = "user_tag3_portrait"
# userDF.createOrReplaceTempView(userTmpTable)
# user_sql = "select * from {} where date between '{}' and '{}' ".format(userTmpTable,user_startDay,user_endDay)
# userDF = spark.sql(user_sql)
# userDF.write.option("header", "true").option("delimiter", "|").csv(user_csv_dir)
# print("service_user write successful", flush=True)
#
# actionTmpTable = "action"
# actionDF.createOrReplaceTempView(actionTmpTable)
# action_sql = "select * from {}".format(actionTmpTable)
# actionDF = spark.sql(action_sql)
# actionDF.write.option("header", "true").option("delimiter", "|").csv(action_csv_dir)
# print("service_action write successful", flush=True)
spark.stop()
\ No newline at end of file
import redis
def getRedisConn():
# pool = redis.ConnectionPool(host="172.16.50.145",password="XfkMCCdWDIU%ls$h",port=6379,db=0)
# conn = redis.Redis(connection_pool=pool)
# conn = redis.Redis(host="172.16.50.145", port=6379, password="XfkMCCdWDIU%ls$h",db=0)
conn = redis.Redis(host="172.18.51.10", port=6379,db=0) #test
return conn
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment