Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
f9f054dc
Commit
f9f054dc
authored
Apr 03, 2019
by
王志伟
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
http://git.wanmeizhensuo.com/ML/ffm-baseline
parents
ace011da
6dcdbc2c
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
125 additions
and
121 deletions
+125
-121
feature.py
eda/esmm/Model_pipline/feature.py
+80
-52
send_mail.py
eda/esmm/Model_pipline/send_mail.py
+0
-34
submit.sh
eda/esmm/Model_pipline/submit.sh
+6
-8
to_database.py
eda/esmm/Model_pipline/to_database.py
+6
-6
to_tfrecord.py
eda/esmm/Model_pipline/to_tfrecord.py
+7
-3
train.py
eda/esmm/Model_pipline/train.py
+10
-2
feature.py
tensnsorflow/es/feature.py
+1
-1
submit.sh
tensnsorflow/es/submit.sh
+4
-4
test.py
tensnsorflow/test.py
+11
-11
No files found.
eda/esmm/Model_pipline/feature.py
View file @
f9f054dc
...
...
@@ -18,39 +18,64 @@ def con_sql(db,sql):
return
df
def
multi_hot
(
df
,
column
,
n
):
df
[
column
]
=
df
[
column
]
.
fillna
(
"lost_na"
)
app_list_value
=
[
i
.
split
(
","
)
for
i
in
df
[
column
]
.
unique
()]
app_list_unique
=
[]
for
i
in
app_list_value
:
app_list_unique
.
extend
(
i
)
app_list_unique
=
list
(
set
(
app_list_unique
))
number
=
len
(
app_list_unique
)
app_list_map
=
dict
(
zip
(
app_list_unique
,
list
(
range
(
n
,
number
+
n
))))
df
[
column
]
=
df
[
column
]
.
apply
(
app_list_func
,
args
=
(
app_list_map
,))
return
number
,
app_list_map
def
get_data
():
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select max(stat_date) from
esmm_train_data"
sql
=
"select max(stat_date) from
{}"
.
format
(
train_data_set
)
validate_date
=
con_sql
(
db
,
sql
)[
0
]
.
values
.
tolist
()[
0
]
print
(
"validate_date:"
+
validate_date
)
temp
=
datetime
.
datetime
.
strptime
(
validate_date
,
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
6
0
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
30
0
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
print
(
start
)
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,
e.clevel1_id
,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,
cl.l1,cl.l2,e.device_id,cut.time
"
\
"from
esmm_train_data
e left join user_feature u on e.device_id = u.device_id "
\
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,
feat.level2_ids
,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,
e.device_id,cut.time,dl.app_list
"
\
"from
{}
e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_level2 cl on e.cid_id = cl.cid "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"where e.stat_date >= '{}'"
.
format
(
start
)
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id "
\
"where e.stat_date >= '{}'"
.
format
(
train_data_set
,
start
)
df
=
con_sql
(
db
,
sql
)
# print(df.shape)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"stat_date"
,
3
:
"ucity_id"
,
4
:
"clevel
1
_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"
l1"
,
11
:
"l2
"
,
1
2
:
"device_id"
,
13
:
"time
"
})
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"stat_date"
,
3
:
"ucity_id"
,
4
:
"clevel
2
_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"
device_id
"
,
1
1
:
"time"
,
12
:
"app_list
"
})
print
(
"esmm data ok"
)
# print(df.head(2)
print
(
"before"
)
print
(
df
.
shape
)
print
(
"after"
)
df
=
df
.
drop_duplicates
()
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"l1"
,
"l2"
,
"time"
,
"stat_date"
])
print
(
df
.
shape
)
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"clevel2_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list"
])
app_list_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
1
)
level2_number
,
level2_map
=
multi_hot
(
df
,
"clevel2_id"
,
1
+
app_list_number
)
# df["app_list"] = df["app_list"].fillna("lost_na")
# app_list_value = [i.split(",") for i in df["app_list"].unique()]
# app_list_unique = []
# for i in app_list_value:
# app_list_unique.extend(i)
# app_list_unique = list(set(app_list_unique))
# app_list_map = dict(zip(app_list_unique, list(range(1, len(app_list_unique) + 1))))
# df["app_list"] = df["app_list"].apply(app_list_func,args=(app_list_map,))
unique_values
=
[]
features
=
[
"ucity_id"
,
"c
level1_id"
,
"c
city_name"
,
"device_type"
,
"manufacturer"
,
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
...
...
@@ -58,25 +83,15 @@ def get_data():
# 下面这行代码是为了区分不同的列中有相同的值
df
[
i
]
=
df
[
i
]
+
i
unique_values
.
extend
(
list
(
df
[
i
]
.
unique
()))
for
i
in
[
"l1"
,
"l2"
]:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
# l1和l2中的值与top类别是一个类别
df
[
i
]
=
df
[
i
]
+
"top"
unique_values
.
extend
(
list
(
df
[
i
]
.
unique
()))
print
(
"features:"
)
print
(
len
(
unique_values
))
print
(
df
.
head
(
2
))
temp
=
list
(
range
(
1
,
len
(
unique_values
)
+
1
))
temp
=
list
(
range
(
1
+
app_list_number
+
level2_number
,
1
+
app_list_number
+
level2_number
+
len
(
unique_values
)))
value_map
=
dict
(
zip
(
unique_values
,
temp
))
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
train
=
df
train
=
df
[
df
[
"stat_date"
]
!=
validate_date
+
"stat_date"
]
test
=
df
[
df
[
"stat_date"
]
==
validate_date
+
"stat_date"
]
for
i
in
[
"ucity_id"
,
"c
level1_id"
,
"c
city_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
l1"
,
"time"
,
"stat_date"
,
"l2
"
]:
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
time"
,
"stat_date
"
]:
train
[
i
]
=
train
[
i
]
.
map
(
value_map
)
test
[
i
]
=
test
[
i
]
.
map
(
value_map
)
...
...
@@ -88,7 +103,18 @@ def get_data():
write_csv
(
train
,
"tr"
,
100000
)
write_csv
(
test
,
"va"
,
80000
)
return
validate_date
,
value_map
return
validate_date
,
value_map
,
app_list_map
,
level2_map
def
app_list_func
(
x
,
l
):
b
=
x
.
split
(
","
)
e
=
[]
for
i
in
b
:
if
i
in
l
.
keys
():
e
.
append
(
l
[
i
])
else
:
e
.
append
(
0
)
return
","
.
join
([
str
(
j
)
for
j
in
e
])
def
write_csv
(
df
,
name
,
n
):
...
...
@@ -102,44 +128,45 @@ def write_csv(df,name,n):
temp
.
to_csv
(
path
+
name
+
"/{}_{}.csv"
.
format
(
name
,
i
),
index
=
False
)
def
get_predict
(
date
,
value_map
):
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
):
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.label,e.ucity_id,
e.clevel1_id
,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,
cl.l1,cl.l2,e.device_id,e.cid_id,cut.time
"
\
sql
=
"select e.y,e.z,e.label,e.ucity_id,
feat.level2_ids
,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,
e.device_id,e.cid_id,cut.time,dl.app_list
"
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_level2 cl on e.cid_id = cl.cid "
\
"left join cid_time_cut cut on e.cid_id = cut.cid"
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel
1
_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"l1"
,
11
:
"l2"
,
1
2
:
"device_id"
,
13
:
"cid_id"
,
14
:
"time
"
})
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel
2
_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
1
0
:
"device_id"
,
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list
"
})
df
[
"stat_date"
]
=
date
print
(
df
.
head
(
6
))
df
[
"app_list"
]
=
df
[
"app_list"
]
.
fillna
(
"lost_na"
)
df
[
"app_list"
]
=
df
[
"app_list"
]
.
apply
(
app_list_func
,
args
=
(
app_list_map
,))
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
fillna
(
"lost_na"
)
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
apply
(
app_list_func
,
args
=
(
level2_map
,))
print
(
"predict shape"
)
print
(
df
.
shape
)
#
print("predict shape")
#
print(df.shape)
df
[
"uid"
]
=
df
[
"device_id"
]
df
[
"city"
]
=
df
[
"ucity_id"
]
features
=
[
"ucity_id"
,
"c
level1_id"
,
"c
city_name"
,
"device_type"
,
"manufacturer"
,
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
df
[
i
]
=
df
[
i
]
+
i
for
i
in
[
"l1"
,
"l2"
]:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
# l1和l2中的值与top类别是一个类别
df
[
i
]
=
df
[
i
]
+
"top"
native_pre
=
df
[
df
[
"label"
]
==
0
]
native_pre
=
native_pre
.
drop
(
"label"
,
axis
=
1
)
nearby_pre
=
df
[
df
[
"label"
]
==
1
]
nearby_pre
=
nearby_pre
.
drop
(
"label"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"c
level1_id"
,
"c
city_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
l1"
,
"time"
,
"stat_date"
,
"l2
"
]:
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
time"
,
"stat_date
"
]:
native_pre
[
i
]
=
native_pre
[
i
]
.
map
(
value_map
)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre
[
i
]
=
native_pre
[
i
]
.
fillna
(
0
)
...
...
@@ -151,19 +178,20 @@ def get_predict(date,value_map):
print
(
"native"
)
print
(
native_pre
.
shape
)
print
(
native_pre
.
head
())
native_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"native.csv"
,
index
=
False
)
write_csv
(
native_pre
,
"native"
,
200000
)
print
(
"nearby"
)
print
(
nearby_pre
.
shape
)
print
(
nearby_pre
.
head
())
nearby_pre
[[
"uid"
,
"city"
,
"cid_id"
]]
.
to_csv
(
path
+
"nearby.csv"
,
index
=
False
)
write_csv
(
nearby_pre
,
"nearby"
,
160000
)
if
__name__
==
'__main__'
:
path
=
"/home/gmuser/esmm_data/"
date
,
value
=
get_data
()
get_predict
(
date
,
value
)
train_data_set
=
"esmm_train_data"
path
=
"/data/esmm/"
date
,
value
,
app_list
,
level2
=
get_data
()
get_predict
(
date
,
value
,
app_list
,
level2
)
eda/esmm/Model_pipline/send_mail.py
deleted
100644 → 0
View file @
ace011da
#coding=utf-8
import
smtplib
from
email.mime.text
import
MIMEText
from
email.utils
import
formataddr
import
datetime
my_sender
=
'gaoyazhe@igengmei.com'
my_pass
=
'VCrKTui99a7ALhiK'
my_user1
=
'gaoyazhe@igengmei.com'
my_user2
=
'zhangyanzhao@igengmei.com'
def
mail
():
ret
=
True
try
:
with
open
(
'/home/gmuser/esmm_data/submit.log'
)
as
f
:
stat_data
=
f
.
read
()
msg
=
MIMEText
(
stat_data
,
'plain'
,
'utf-8'
)
msg
[
'From'
]
=
formataddr
([
"高雅喆"
,
my_sender
])
msg
[
'To'
]
=
my_user1
+
','
+
my_user2
msg
[
'Subject'
]
=
str
(
datetime
.
date
.
today
())
+
"-esmm多目标模型训练指标统计"
server
=
smtplib
.
SMTP_SSL
(
"smtp.exmail.qq.com"
,
465
)
server
.
login
(
my_sender
,
my_pass
)
server
.
sendmail
(
my_sender
,[
my_user1
,
my_user2
],
msg
.
as_string
())
server
.
quit
()
except
Exception
:
ret
=
False
return
ret
ret
=
mail
()
if
ret
:
print
(
"邮件发送成功"
)
else
:
print
(
"邮件发送失败"
)
\ No newline at end of file
eda/esmm/Model_pipline/submit.sh
View file @
f9f054dc
#! /bin/bash
git checkout master
PYTHON_PATH
=
/home/gaoyazhe/miniconda3/bin/python
MODEL_PATH
=
/srv/apps/ffm-baseline/
eda/esmm/Model_pipline
DATA_PATH
=
/
home/gmuser/esmm_data
MODEL_PATH
=
/srv/apps/ffm-baseline/
tensnsorflow/es
DATA_PATH
=
/
data/esmm
echo
"rm leave tfrecord"
rm
${
DATA_PATH
}
/tr/
*
rm
${
DATA_PATH
}
/va/
*
rm
${
DATA_PATH
}
/native/
*
rm
${
DATA_PATH
}
/nearby/
*
rm
-r
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/20
1
*
rm
-r
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/20
*
echo
"data"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/feature.py
>
${
DATA_PATH
}
/feature.log
...
...
@@ -33,16 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_*
echo
"train..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
2
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
2
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
echo
"infer native..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
2
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
echo
"infer nearby..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
2
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300
000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
echo
"sort and 2sql"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/to_database.py
eda/esmm/Model_pipline/to_database.py
View file @
f9f054dc
...
...
@@ -3,6 +3,7 @@
from
sqlalchemy
import
create_engine
import
pandas
as
pd
import
pymysql
import
MySQLdb
import
time
def
con_sql
(
sql
):
...
...
@@ -36,10 +37,10 @@ def native_set_join(lst):
def
main
():
# native queue
df2
=
pd
.
read_csv
(
'/
home/gmuser/esmm_data
/native.csv'
)
df2
=
pd
.
read_csv
(
'/
data/esmm
/native.csv'
)
df2
[
'cid_id'
]
=
df2
[
'cid_id'
]
.
astype
(
str
)
df1
=
pd
.
read_csv
(
"/
home/gmuser/esmm_data
/native/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df1
=
pd
.
read_csv
(
"/
data/esmm
/native/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df2
[
"ctr"
],
df2
[
"cvr"
],
df2
[
"ctcvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
],
df1
[
"ctcvr"
]
df3
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"ctcvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
native_set_join
})
.
reset_index
(
drop
=
False
)
df3
.
columns
=
[
"device_id"
,
"city_id"
,
"native_queue"
]
...
...
@@ -47,10 +48,10 @@ def main():
# nearby queue
df2
=
pd
.
read_csv
(
'/
home/gmuser/esmm_data
/nearby.csv'
)
df2
=
pd
.
read_csv
(
'/
data/esmm
/nearby.csv'
)
df2
[
'cid_id'
]
=
df2
[
'cid_id'
]
.
astype
(
str
)
df1
=
pd
.
read_csv
(
"/
home/gmuser/esmm_data
/nearby/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df1
=
pd
.
read_csv
(
"/
data/esmm
/nearby/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df2
[
"ctr"
],
df2
[
"cvr"
],
df2
[
"ctcvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
],
df1
[
"ctcvr"
]
df4
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"ctcvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
nearby_set_join
})
.
reset_index
(
drop
=
False
)
df4
.
columns
=
[
"device_id"
,
"city_id"
,
"nearby_queue"
]
...
...
@@ -65,7 +66,6 @@ def main():
print
(
"union_device_count"
,
df_all
.
shape
)
host
=
'10.66.157.22'
port
=
4000
user
=
'root'
...
...
@@ -84,7 +84,7 @@ def main():
cur
=
con
.
cursor
()
cur
.
execute
(
delete_str
)
con
.
commit
()
df_all
.
to_sql
(
'esmm_device_diary_queue'
,
con
=
engine
,
if_exists
=
'append'
,
index
=
False
,
chunksize
=
8000
)
df_all
.
to_sql
(
'esmm_device_diary_queue'
,
con
=
engine
,
if_exists
=
'append'
,
index
=
False
)
except
Exception
as
e
:
print
(
e
)
...
...
eda/esmm/Model_pipline/to_tfrecord.py
View file @
f9f054dc
...
...
@@ -28,15 +28,19 @@ def gen_tfrecords(in_file):
df
=
pd
.
read_csv
(
in_file
)
for
i
in
range
(
df
.
shape
[
0
]):
feats
=
[
"ucity_id"
,
"c
level1_id"
,
"c
city_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
l1"
,
"time"
,
"stat_date"
,
"l2
"
]
feats
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"
time"
,
"stat_date
"
]
id
=
np
.
array
([])
for
j
in
feats
:
id
=
np
.
append
(
id
,
df
[
j
][
i
])
app_list
=
np
.
array
(
str
(
df
[
"app_list"
][
i
])
.
split
(
","
))
level2_list
=
np
.
array
(
str
(
df
[
"clevel2_id"
][
i
])
.
split
(
","
))
features
=
tf
.
train
.
Features
(
feature
=
{
"y"
:
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
[
df
[
"y"
][
i
]])),
"z"
:
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
[
df
[
"z"
][
i
]])),
"ids"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
id
.
astype
(
np
.
int
)))
"ids"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
id
.
astype
(
np
.
int
))),
"app_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
app_list
.
astype
(
np
.
int
))),
"level2_list"
:
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
level2_list
.
astype
(
np
.
int
)))
})
example
=
tf
.
train
.
Example
(
features
=
features
)
...
...
eda/esmm/Model_pipline/train.py
View file @
f9f054dc
...
...
@@ -53,7 +53,9 @@ def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
features
=
{
"y"
:
tf
.
FixedLenFeature
([],
tf
.
float32
),
"z"
:
tf
.
FixedLenFeature
([],
tf
.
float32
),
"ids"
:
tf
.
FixedLenFeature
([
11
],
tf
.
int64
)
"ids"
:
tf
.
FixedLenFeature
([
FLAGS
.
field_size
],
tf
.
int64
),
"app_list"
:
tf
.
VarLenFeature
(
tf
.
int64
),
"level2_list"
:
tf
.
VarLenFeature
(
tf
.
int64
)
}
parsed
=
tf
.
parse_single_example
(
record
,
features
)
...
...
@@ -99,6 +101,8 @@ def model_fn(features, labels, mode, params):
Feat_Emb
=
tf
.
get_variable
(
name
=
'embeddings'
,
shape
=
[
feature_size
,
embedding_size
],
initializer
=
tf
.
glorot_normal_initializer
())
feat_ids
=
features
[
'ids'
]
app_list
=
features
[
'app_list'
]
level2_list
=
features
[
'level2_list'
]
if
FLAGS
.
task_type
!=
"infer"
:
y
=
labels
[
'y'
]
...
...
@@ -107,8 +111,12 @@ def model_fn(features, labels, mode, params):
#------build f(x)------
with
tf
.
variable_scope
(
"Shared-Embedding-layer"
):
embedding_id
=
tf
.
nn
.
embedding_lookup
(
Feat_Emb
,
feat_ids
)
app_id
=
tf
.
nn
.
embedding_lookup_sparse
(
Feat_Emb
,
sp_ids
=
app_list
,
sp_weights
=
None
,
combiner
=
"sum"
)
level2
=
tf
.
nn
.
embedding_lookup_sparse
(
Feat_Emb
,
sp_ids
=
level2_list
,
sp_weights
=
None
,
combiner
=
"sum"
)
x_concat
=
tf
.
reshape
(
embedding_id
,
shape
=
[
-
1
,
common_dims
])
# None * (F * K)
# x_concat = tf.reshape(embedding_id,shape=[-1, common_dims]) # None * (F * K)
x_concat
=
tf
.
concat
([
tf
.
reshape
(
embedding_id
,
shape
=
[
-
1
,
common_dims
]),
app_id
,
level2
],
axis
=
1
)
with
tf
.
name_scope
(
"CVR_Task"
):
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
...
...
tensnsorflow/es/feature.py
View file @
f9f054dc
...
...
@@ -136,7 +136,7 @@ def get_predict(date,value_map,app_list_map,level2_map):
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id
limit 600
"
"left join diary_feat feat on e.cid_id = feat.diary_id"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel2_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
...
...
tensnsorflow/es/
pipeline
.sh
→
tensnsorflow/es/
submit
.sh
View file @
f9f054dc
...
...
@@ -9,7 +9,7 @@ rm ${DATA_PATH}/tr/*
rm
${
DATA_PATH
}
/va/
*
rm
${
DATA_PATH
}
/native/
*
rm
${
DATA_PATH
}
/nearby/
*
rm
-r
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/20
1
*
rm
-r
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/20
*
echo
"data"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/feature.py
>
${
DATA_PATH
}
/feature.log
...
...
@@ -32,15 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_*
echo
"train..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.5,0.5,0.5,0.5,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
echo
"infer native..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.5,0.5,0.5,0.5,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/native_infer.log
echo
"infer nearby..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
9
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.5,0.5,0.5,0.5,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.
5
--learning_rate
=
0.0001
--deep_layers
=
512,256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
8
--feature_size
=
300000
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/nearby_infer.log
echo
"sort and 2sql"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/to_database.py
tensnsorflow/test.py
View file @
f9f054dc
import
datetime
#
from pyspark.sql import HiveContext
#
from pyspark.context import SparkContext
#
from pyspark.conf import SparkConf
#
from pyspark.sql import SQLContext
#
from pyspark.sql import SparkSession
from
pyspark.sql
import
HiveContext
from
pyspark.context
import
SparkContext
from
pyspark.conf
import
SparkConf
from
pyspark.sql
import
SQLContext
from
pyspark.sql
import
SparkSession
# from py4j.java_gateway import java_import
# import pytispark.pytispark as pti
import
pandas
as
pd
...
...
@@ -26,12 +26,12 @@ def con_sql(db,sql):
# def test():
#
conf = SparkConf().setAppName("My App").set("spark.io.compression.codec", "lzf")
#
sc = SparkContext(conf = conf)
#
hive_context = HiveContext(sc)
#
hive_context.sql(''' select device["device_type"] from online.tl_hdfs_maidian_view
#
where partition_date = '20181012' and action = "page_view"
#
and params["page_name"] = "diary_detail" and params["referrer"] = "home" limit 10 ''').show(6)
conf
=
SparkConf
()
.
setAppName
(
"My App"
)
.
set
(
"spark.io.compression.codec"
,
"lzf"
)
sc
=
SparkContext
(
conf
=
conf
)
hive_context
=
HiveContext
(
sc
)
hive_context
.
sql
(
''' select device["device_type"] from online.tl_hdfs_maidian_view
where partition_date = '20181012' and action = "page_view"
and params["page_name"] = "diary_detail" and params["referrer"] = "home" limit 10 '''
)
.
show
(
6
)
# def esmm_pre():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment