Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
11fee6af
Commit
11fee6af
authored
Apr 16, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'zhao' into 'master'
Zhao See merge request
!16
parents
c5834874
10546ff7
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
97 additions
and
49 deletions
+97
-49
feature.py
eda/esmm/Model_pipline/feature.py
+82
-41
submit.sh
eda/esmm/Model_pipline/submit.sh
+3
-3
to_tfrecord.py
eda/esmm/Model_pipline/to_tfrecord.py
+7
-3
train.py
eda/esmm/Model_pipline/train.py
+5
-2
No files found.
eda/esmm/Model_pipline/feature.py
View file @
11fee6af
...
...
@@ -40,44 +40,64 @@ def get_data():
start
=
(
temp
-
datetime
.
timedelta
(
days
=
300
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
print
(
start
)
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,"
\
"u.
device_type,u.manufacturer,u.channel,c.top,e.device_id,cut.time,dl.app_list
"
\
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,
u.device_type,u.manufacturer,
"
\
"u.
channel,c.top,e.device_id,cut.time,dl.app_list,e.diary_service_id,feat.level3_ids,feat.level2
"
\
"from {} e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id "
\
"where e.stat_date >= '{}'"
.
format
(
train_data_set
,
start
)
"where e.stat_date >= '{}'"
.
format
(
train_data_set
,
start
)
df
=
con_sql
(
db
,
sql
)
# print(df.shape)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"stat_date"
,
3
:
"ucity_id"
,
4
:
"clevel2_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
11
:
"time"
,
12
:
"app_list"
})
print
(
"esmm data ok"
)
# print(df.head(2)
11
:
"time"
,
12
:
"app_list"
,
13
:
"service_id"
,
14
:
"level3_ids"
,
15
:
"level2"
})
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select level2_id,treatment_method,price_min,price_max,treatment_time,maintain_time,recover_time "
\
"from train_Knowledge_network_data"
knowledge
=
con_sql
(
db
,
sql
)
knowledge
=
knowledge
.
rename
(
columns
=
{
0
:
"level2"
,
1
:
"method"
,
2
:
"min"
,
3
:
"max"
,
4
:
"treatment_time"
,
5
:
"maintain_time"
,
6
:
"recover_time"
})
knowledge
[
"level2"
]
=
knowledge
[
"level2"
]
.
astype
(
"str"
)
df
=
pd
.
merge
(
df
,
knowledge
,
on
=
'level2'
,
how
=
'left'
)
df
=
df
.
drop
(
"level2"
,
axis
=
1
)
service_id
=
tuple
(
df
[
"service_id"
]
.
unique
())
db
=
pymysql
.
connect
(
host
=
'rdsfewzdmf0jfjp9un8xj.mysql.rds.aliyuncs.com'
,
port
=
3306
,
user
=
'work'
,
passwd
=
'BJQaT9VzDcuPBqkd'
,
db
=
'zhengxing'
)
sql
=
"select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id "
\
"where s.id in {}"
.
format
(
service_id
)
hospital
=
con_sql
(
db
,
sql
)
hospital
=
hospital
.
rename
(
columns
=
{
0
:
"service_id"
,
1
:
"hospital_id"
})
# print(hospital.head())
# print("hospital")
# print(hospital.count())
hospital
[
"service_id"
]
=
hospital
[
"service_id"
]
.
astype
(
"str"
)
df
=
pd
.
merge
(
df
,
hospital
,
on
=
'service_id'
,
how
=
'left'
)
df
=
df
.
drop
(
"service_id"
,
axis
=
1
)
print
(
df
.
count
())
print
(
"before"
)
print
(
df
.
shape
)
df
=
df
.
drop_duplicates
()
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"clevel2_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list
"
])
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list"
,
"hospital_id"
,
"level3_ids
"
])
print
(
"after"
)
print
(
df
.
shape
)
app_list_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
1
)
level2_number
,
level2_map
=
multi_hot
(
df
,
"clevel2_id"
,
1
+
app_list_number
)
# df["app_list"] = df["app_list"].fillna("lost_na")
# app_list_value = [i.split(",") for i in df["app_list"].unique()]
# app_list_unique = []
# for i in app_list_value:
# app_list_unique.extend(i)
# app_list_unique = list(set(app_list_unique))
# app_list_map = dict(zip(app_list_unique, list(range(1, len(app_list_unique) + 1))))
# df["app_list"] = df["app_list"].apply(app_list_func,args=(app_list_map,))
app_list_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
2
)
level2_number
,
level2_map
=
multi_hot
(
df
,
"clevel2_id"
,
2
+
app_list_number
)
level3_number
,
level3_map
=
multi_hot
(
df
,
"level3_ids"
,
2
+
app_list_number
+
level2_number
)
unique_values
=
[]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -85,14 +105,16 @@ def get_data():
df
[
i
]
=
df
[
i
]
+
i
unique_values
.
extend
(
list
(
df
[
i
]
.
unique
()))
temp
=
list
(
range
(
1
+
app_list_number
+
level2_number
,
1
+
app_list_number
+
level2_number
+
len
(
unique_values
)))
value_map
=
dict
(
zip
(
unique_values
,
temp
))
temp
=
list
(
range
(
2
+
app_list_number
+
level2_number
+
level3_number
,
2
+
app_list_number
+
level2_number
+
level3_number
+
len
(
unique_values
)))
value_map
=
dict
(
zip
(
unique_values
,
temp
))
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
train
=
df
[
df
[
"stat_date"
]
!=
validate_date
+
"stat_date"
]
test
=
df
[
df
[
"stat_date"
]
==
validate_date
+
"stat_date"
]
train
=
df
[
df
[
"stat_date"
]
!=
validate_date
+
"stat_date"
]
test
=
df
[
df
[
"stat_date"
]
==
validate_date
+
"stat_date"
]
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]:
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]:
train
[
i
]
=
train
[
i
]
.
map
(
value_map
)
test
[
i
]
=
test
[
i
]
.
map
(
value_map
)
...
...
@@ -101,10 +123,10 @@ def get_data():
print
(
"test shape"
)
print
(
test
.
shape
)
write_csv
(
train
,
"tr"
,
100000
)
write_csv
(
test
,
"va"
,
80000
)
write_csv
(
train
,
"tr"
,
100000
)
write_csv
(
test
,
"va"
,
80000
)
return
validate_date
,
value_map
,
app_list_map
,
level2
_map
return
validate_date
,
value_map
,
app_list_map
,
level2_map
,
level3
_map
def
app_list_func
(
x
,
l
):
...
...
@@ -129,10 +151,11 @@ def write_csv(df,name,n):
temp
.
to_csv
(
path
+
name
+
"/{}_{}.csv"
.
format
(
name
,
i
),
index
=
False
)
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
):
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
,
level3_map
):
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,dl.app_list "
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,"
\
"dl.app_list,e.hospital_id,feat.level3_ids,feat.level2 "
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
...
...
@@ -140,22 +163,39 @@ def get_predict(date,value_map,app_list_map,level2_map):
"left join diary_feat feat on e.cid_id = feat.diary_id"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel2_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list"
})
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list"
,
14
:
"hospital_id"
,
15
:
"level3_ids"
,
16
:
"level2"
})
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select level2_id,treatment_method,price_min,price_max,treatment_time,maintain_time,recover_time "
\
"from train_Knowledge_network_data"
knowledge
=
con_sql
(
db
,
sql
)
knowledge
=
knowledge
.
rename
(
columns
=
{
0
:
"level2"
,
1
:
"method"
,
2
:
"min"
,
3
:
"max"
,
4
:
"treatment_time"
,
5
:
"maintain_time"
,
6
:
"recover_time"
})
knowledge
[
"level2"
]
=
knowledge
[
"level2"
]
.
astype
(
"str"
)
df
=
pd
.
merge
(
df
,
knowledge
,
on
=
'level2'
,
how
=
'left'
)
df
=
df
.
drop
(
"level2"
,
axis
=
1
)
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"clevel2_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"app_list"
,
"hospital_id"
,
"level3_ids"
])
df
[
"stat_date"
]
=
date
print
(
df
.
head
(
6
))
df
[
"app_list"
]
=
df
[
"app_list"
]
.
fillna
(
"lost_na"
)
df
[
"app_list"
]
=
df
[
"app_list"
]
.
apply
(
app_list_func
,
args
=
(
app_list_map
,))
df
[
"app_list"
]
=
df
[
"app_list"
]
.
apply
(
app_list_func
,
args
=
(
app_list_map
,))
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
fillna
(
"lost_na"
)
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
apply
(
app_list_func
,
args
=
(
level2_map
,))
df
[
"level3_ids"
]
=
df
[
"level3_ids"
]
.
fillna
(
"lost_na"
)
df
[
"level3_ids"
]
=
df
[
"level3_ids"
]
.
apply
(
app_list_func
,
args
=
(
level3_map
,))
# print("predict shape")
# print(df.shape)
df
[
"uid"
]
=
df
[
"device_id"
]
df
[
"city"
]
=
df
[
"ucity_id"
]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -167,7 +207,8 @@ def get_predict(date,value_map,app_list_map,level2_map):
nearby_pre
=
nearby_pre
.
drop
(
"label"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]:
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]:
native_pre
[
i
]
=
native_pre
[
i
]
.
map
(
value_map
)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre
[
i
]
=
native_pre
[
i
]
.
fillna
(
0
)
...
...
@@ -176,23 +217,23 @@ def get_predict(date,value_map,app_list_map,level2_map):
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
nearby_pre
[
i
]
=
nearby_pre
[
i
]
.
fillna
(
0
)
print
(
"native"
)
print
(
native_pre
.
shape
)
native_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"native.csv"
,
index
=
False
)
write_csv
(
native_pre
,
"native"
,
200000
)
native_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"native.csv"
,
index
=
False
)
write_csv
(
native_pre
,
"native"
,
200000
)
print
(
"nearby"
)
print
(
nearby_pre
.
shape
)
nearby_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"nearby.csv"
,
index
=
False
)
nearby_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"nearby.csv"
,
index
=
False
)
write_csv
(
nearby_pre
,
"nearby"
,
160000
)
if
__name__
==
'__main__'
:
train_data_set
=
"esmm_train_data"
path
=
"/data/esmm/"
date
,
value
,
app_list
,
level2
=
get_data
()
get_predict
(
date
,
value
,
app_list
,
level2
)
date
,
value
,
app_list
,
level2
,
level3
=
get_data
()
get_predict
(
date
,
value
,
app_list
,
level2
,
level3
)
eda/esmm/Model_pipline/submit.sh
View file @
11fee6af
...
...
@@ -32,15 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_*
echo
"train..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
2000
--field_size
=
15
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
echo
"infer native..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
2000
--field_size
=
15
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
echo
"infer nearby..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
2000
--field_size
=
15
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
echo
"sort and 2sql"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/to_database.py
>
${
DATA_PATH
}
/insert_database.log
eda/esmm/Model_pipline/to_tfrecord.py
View file @
11fee6af
...
...
@@ -21,6 +21,7 @@ tf.app.flags.DEFINE_string("input_dir", "./", "input dir")
tf
.
app
.
flags
.
DEFINE_string
(
"output_dir"
,
"./"
,
"output dir"
)
tf
.
app
.
flags
.
DEFINE_integer
(
"threads"
,
16
,
"threads num"
)
def
gen_tfrecords
(
in_file
):
basename
=
os
.
path
.
basename
(
in_file
)
+
".tfrecord"
out_file
=
os
.
path
.
join
(
FLAGS
.
output_dir
,
basename
)
...
...
@@ -29,18 +30,21 @@ def gen_tfrecords(in_file):
for
i
in
range
(
df
.
shape
[
0
]):
feats
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
id
=
np
.
array
([])
for
j
in
feats
:
id
=
np
.
append
(
id
,
df
[
j
][
i
])
app_list
=
np
.
array
(
str
(
df
[
"app_list"
][
i
])
.
split
(
","
))
level2_list
=
np
.
array
(
str
(
df
[
"clevel2_id"
][
i
])
.
split
(
","
))
level3_list
=
np
.
array
(
str
(
df
[
"level3_ids"
][
i
])
.
split
(
","
))
features
=
tf
.
train
.
Features
(
feature
=
{
"y"
:
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
[
df
[
"y"
][
i
]])),
"z"
:
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
[
df
[
"z"
][
i
]])),
"ids"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
id
.
astype
(
np
.
int
))),
"app_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
app_list
.
astype
(
np
.
int
))),
"level2_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
level2_list
.
astype
(
np
.
int
)))
"app_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
app_list
.
astype
(
np
.
int
))),
"level2_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
level2_list
.
astype
(
np
.
int
))),
"level3_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
level3_list
.
astype
(
np
.
int
)))
})
example
=
tf
.
train
.
Example
(
features
=
features
)
...
...
eda/esmm/Model_pipline/train.py
View file @
11fee6af
...
...
@@ -55,7 +55,8 @@ def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
"z"
:
tf
.
FixedLenFeature
([],
tf
.
float32
),
"ids"
:
tf
.
FixedLenFeature
([
FLAGS
.
field_size
],
tf
.
int64
),
"app_list"
:
tf
.
VarLenFeature
(
tf
.
int64
),
"level2_list"
:
tf
.
VarLenFeature
(
tf
.
int64
)
"level2_list"
:
tf
.
VarLenFeature
(
tf
.
int64
),
"level3_list"
:
tf
.
VarLenFeature
(
tf
.
int64
)
}
parsed
=
tf
.
parse_single_example
(
record
,
features
)
...
...
@@ -103,6 +104,7 @@ def model_fn(features, labels, mode, params):
feat_ids
=
features
[
'ids'
]
app_list
=
features
[
'app_list'
]
level2_list
=
features
[
'level2_list'
]
level3_list
=
features
[
'level3_list'
]
if
FLAGS
.
task_type
!=
"infer"
:
y
=
labels
[
'y'
]
...
...
@@ -113,10 +115,11 @@ def model_fn(features, labels, mode, params):
embedding_id
=
tf
.
nn
.
embedding_lookup
(
Feat_Emb
,
feat_ids
)
app_id
=
tf
.
nn
.
embedding_lookup_sparse
(
Feat_Emb
,
sp_ids
=
app_list
,
sp_weights
=
None
,
combiner
=
"sum"
)
level2
=
tf
.
nn
.
embedding_lookup_sparse
(
Feat_Emb
,
sp_ids
=
level2_list
,
sp_weights
=
None
,
combiner
=
"sum"
)
level3
=
tf
.
nn
.
embedding_lookup_sparse
(
Feat_Emb
,
sp_ids
=
level3_list
,
sp_weights
=
None
,
combiner
=
"sum"
)
# x_concat = tf.reshape(embedding_id,shape=[-1, common_dims]) # None * (F * K)
x_concat
=
tf
.
concat
([
tf
.
reshape
(
embedding_id
,
shape
=
[
-
1
,
common_dims
]),
app_id
,
level2
],
axis
=
1
)
x_concat
=
tf
.
concat
([
tf
.
reshape
(
embedding_id
,
shape
=
[
-
1
,
common_dims
]),
app_id
,
level2
,
level3
],
axis
=
1
)
with
tf
.
name_scope
(
"CVR_Task"
):
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment