Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
246e3a1c
Commit
246e3a1c
authored
Dec 18, 2018
by
王志伟
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
http://git.wanmeizhensuo.com/ML/ffm-baseline
parents
9b0dc205
574c9a4f
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
94 additions
and
112 deletions
+94
-112
data2ffm.py
eda/esmm/Feature_pipline/data2ffm.py
+0
-0
get_tfrecord.py
eda/esmm/Feature_pipline/get_tfrecord.py
+4
-5
DeepCvrMTL.py
eda/esmm/Model_pipline/DeepCvrMTL.py
+1
-2
send_mail.py
eda/esmm/Model_pipline/send_mail.py
+1
-1
sort_and_2sql.py
eda/esmm/Model_pipline/sort_and_2sql.py
+17
-24
submit.sh
eda/esmm/Model_pipline/submit.sh
+16
-19
EsmmData.scala
eda/feededa/src/main/scala/com/gmei/EsmmData.scala
+2
-2
merge_sort.py
eda/merge_sort.py
+14
-0
ffm.py
tensnsorflow/ffm.py
+39
-59
No files found.
eda/esmm/Feature_pipline/data2ffm.py
0 → 100644
View file @
246e3a1c
This diff is collapsed.
Click to expand it.
eda/esmm/Feature_pipline/get_tfrecord.py
View file @
246e3a1c
#!/usr/bin/env python
#coding=utf-8
from
__future__
import
absolute_import
...
...
@@ -26,10 +25,10 @@ tf.app.flags.DEFINE_integer("threads", 16, "threads num")
#User_Fileds = set(['101','109_14','110_14','127_14','150_14','121','122','124','125','126','127','128','129'])
#Ad_Fileds = set(['205','206','207','210','216'])
#Context_Fileds = set(['508','509','702','853','301'])
#Common_Fileds = {'1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9','10':'10','11':'11','12':'12','13':'13','14':'14','15':'15','16':'16','17':'17','18':'18','19':'19','20':'20','21':'21','22':'22','23':'23','24':'24','25':'25','26':'26','27':'27','28':'28','29':'29','30':'30
'}
Common_Fileds
=
{
'1'
:
'1'
,
'2'
:
'2'
,
'3'
:
'3'
,
'4'
:
'4'
,
'5'
:
'5'
,
'6'
:
'6'
,
'7'
:
'7'
,
'8'
:
'8'
,
'9'
:
'9'
,
'10'
:
'10'
,
'11'
:
'11'
}
UMH_Fileds
=
{
'109_14'
:(
'u_cat'
,
'12'
),
'110_14'
:(
'u_shop'
,
'13'
),
'127_14'
:(
'u_brand'
,
'14'
),
'150_14'
:(
'u_int'
,
'15'
)}
#user multi-hot feature
Ad_Fileds
=
{
'206'
:(
'a_cat'
,
'16'
),
'207'
:(
'a_shop'
,
'17'
),
'210'
:(
'a_int'
,
'18'
),
'216'
:(
'a_brand'
,
'19'
)}
#ad feature for DIN
Common_Fileds
=
{
'1'
:
'1'
,
'2'
:
'2'
,
'3'
:
'3'
,
'4'
:
'4'
,
'5'
:
'5'
,
'6'
:
'6'
,
'7'
:
'7'
,
'8'
:
'8'
,
'9'
:
'9'
,
'10'
:
'10'
,
'11'
:
'11'
,
'12'
:
'12'
,
'13'
:
'13'
,
'14'
:
'14'
,
'15'
:
'15'
,
'16'
:
'16'
,
'17'
:
'17'
,
'18'
:
'18'
,
'19'
:
'19'
,
'20'
:
'20'
,
'21'
:
'21'
,
'22'
:
'22'
,
'23'
:
'23
'
}
#
Common_Fileds = {'1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9','10':'10','11':'11'}
UMH_Fileds
=
{
'109_14'
:(
'u_cat'
,
'12'
),
'110_14'
:(
'u_shop'
,
'13'
),
'127_14'
:(
'u_brand'
,
'14'
),
'150_14'
:(
'u_int'
,
'15'
)}
#user multi-hot feature
Ad_Fileds
=
{
'206'
:(
'a_cat'
,
'16'
),
'207'
:(
'a_shop'
,
'17'
),
'210'
:(
'a_int'
,
'18'
),
'216'
:(
'a_brand'
,
'19'
)}
#ad feature for DIN
#40362692,0,0,216:9342395:1.0 301:9351665:1.0 205:7702673:1.0 206:8317829:1.0 207:8967741:1.0 508:9356012:2.30259 210:9059239:1.0 210:9042796:1.0 210:9076972:1.0 210:9103884:1.0 210:9063064:1.0 127_14:3529789:2.3979 127_14:3806412:2.70805
def
gen_tfrecords
(
in_file
):
...
...
eda/esmm/Model_pipline/DeepCvrMTL.py
View file @
246e3a1c
#!/usr/bin/env python
#coding=utf-8
#from __future__ import absolute_import
...
...
@@ -346,7 +345,7 @@ def main(_):
print
(
"-"
*
100
)
with
open
(
FLAGS
.
data_dir
+
"/pred.txt"
,
"w"
)
as
fo
:
for
prob
in
preds
:
fo
.
write
(
"
%
f
\t
%
f
\
n
"
%
(
prob
[
'pctr'
],
prob
[
'p
cvr'
]))
fo
.
write
(
"
%
f
\t
%
f
\
t
%
f
\n
"
%
(
prob
[
'pctr'
],
prob
[
'pcvr'
],
prob
[
'pct
cvr'
]))
elif
FLAGS
.
task_type
==
'export'
:
print
(
"Not Implemented, Do It Yourself!"
)
#feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
...
...
eda/esmm/Model_pipline/send_mail.py
View file @
246e3a1c
#
-*- coding: utf-8 -*-
#
coding=utf-8
import
smtplib
from
email.mime.text
import
MIMEText
...
...
eda/esmm/Model_pipline/sort_and_2sql.py
View file @
246e3a1c
#coding=utf-8
from
sqlalchemy
import
create_engine
import
pandas
as
pd
import
pymysql
...
...
@@ -17,39 +19,30 @@ def con_sql(sql):
return
result
def
set_join
(
lst
):
return
','
.
join
(
set
(
lst
)
)
return
','
.
join
(
[
str
(
i
)
for
i
in
set
(
lst
)]
)
def
main
():
sql
=
"select device_id,city_id,cid from esmm_data2ffm_infer_native"
result
=
con_sql
(
sql
)
dct
=
{
"uid"
:[],
"city"
:[],
"cid_id"
:[]}
for
i
in
result
:
dct
[
"uid"
]
.
append
(
i
[
0
])
dct
[
"city"
]
.
append
(
i
[
1
])
dct
[
"cid_id"
]
.
append
(
i
[
2
])
df1
=
pd
.
read_csv
(
"/home/gaoyazhe/data/native/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
])
df2
=
pd
.
DataFrame
(
dct
)
df2
[
"ctr"
],
df2
[
"cvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
]
df3
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"cvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
set_join
})
.
reset_index
(
drop
=
False
)
# native queue
df2
=
pd
.
read_csv
(
'/home/gaoyazhe/data/native.csv'
,
usecols
=
[
0
,
1
,
2
],
header
=
0
,
names
=
[
'uid'
,
'city'
,
'cid_id'
],
sep
=
'
\t
'
)
df2
[
'cid_id'
]
=
df2
[
'cid_id'
]
.
astype
(
'object'
)
df1
=
pd
.
read_csv
(
"/home/gaoyazhe/data/native/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df2
[
"ctr"
],
df2
[
"cvr"
],
df2
[
"ctcvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
],
df1
[
"ctcvr"
]
df3
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"ctcvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
set_join
})
.
reset_index
(
drop
=
False
)
ctime
=
int
(
time
.
time
())
df3
[
"time"
]
=
ctime
df3
.
columns
=
[
"device_id"
,
"city_id"
,
"native_queue"
,
"time"
]
print
(
"native_device_count"
,
df3
.
shape
)
sql_nearby
=
"select device_id,city_id,cid from esmm_data2ffm_infer_nearby"
result
=
con_sql
(
sql_nearby
)
dct
=
{
"uid"
:[],
"city"
:[],
"cid_id"
:[]}
for
i
in
result
:
dct
[
"uid"
]
.
append
(
i
[
0
])
dct
[
"city"
]
.
append
(
i
[
1
])
dct
[
"cid_id"
]
.
append
(
i
[
2
])
# nearby queue
df2
=
pd
.
read_csv
(
'/home/gaoyazhe/data/nearby.csv'
,
usecols
=
[
0
,
1
,
2
],
header
=
0
,
names
=
[
'uid'
,
'city'
,
'cid_id'
],
sep
=
'
\t
'
)
df2
[
'cid_id'
]
=
df2
[
'cid_id'
]
.
astype
(
'object'
)
df1
=
pd
.
read_csv
(
"/home/gaoyazhe/data/nearby/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
])
df2
=
pd
.
DataFrame
(
dct
)
df2
[
"ctr"
],
df2
[
"cvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
]
df4
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"cvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
set_join
})
.
reset_index
(
drop
=
False
)
df1
=
pd
.
read_csv
(
"/home/gaoyazhe/data/nearby/pred.txt"
,
sep
=
'
\t
'
,
header
=
None
,
names
=
[
"ctr"
,
"cvr"
,
"ctcvr"
])
df2
[
"ctr"
],
df2
[
"cvr"
],
df2
[
"ctcvr"
]
=
df1
[
"ctr"
],
df1
[
"cvr"
],
df1
[
"ctcvr"
]
df4
=
df2
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
apply
(
lambda
x
:
x
.
sort_values
(
by
=
"ctcvr"
,
ascending
=
False
))
.
reset_index
(
drop
=
True
)
.
groupby
(
by
=
[
"uid"
,
"city"
])
.
agg
({
'cid_id'
:
set_join
})
.
reset_index
(
drop
=
False
)
df4
.
columns
=
[
"device_id"
,
"city_id"
,
"nearby_queue"
]
print
(
"nearby_device_count"
,
df4
.
shape
)
...
...
eda/esmm/Model_pipline/submit.sh
View file @
246e3a1c
...
...
@@ -3,11 +3,11 @@ PYTHON_PATH=/home/gaoyazhe/miniconda3/bin/python
MODEL_PATH
=
/srv/apps/ffm-baseline/eda/esmm
DATA_PATH
=
/home/gaoyazhe/data
echo
"start time
stamp
"
echo
"start time"
current
=
$(
date
"+%Y-%m-%d %H:%M:%S"
)
timeStamp
=
$(
date
-d
"
$current
"
+%s
)
currentTimeStamp
=
$((
timeStamp
*
1000
+
`
date
"+%N"
`
/
1000000
))
echo
$current
TimeStamp
echo
$current
echo
"rm leave tfrecord"
rm
${
DATA_PATH
}
/tr/
*
...
...
@@ -15,11 +15,8 @@ rm ${DATA_PATH}/va/*
rm
${
DATA_PATH
}
/native/
*
rm
${
DATA_PATH
}
/nearby/
*
echo
"mysql to csv"
mysql
-u
root
-p3SYz54LS9
#^9sBvC -h 10.66.157.22 -P 4000 -D jerry_test -e "select number,data from esmm_data2ffm_train" > ${DATA_PATH}/tr.csv
mysql
-u
root
-p3SYz54LS9
#^9sBvC -h 10.66.157.22 -P 4000 -D jerry_test -e "select number,data from esmm_data2ffm_cv" > ${DATA_PATH}/va.csv
mysql
-u
root
-p3SYz54LS9
#^9sBvC -h 10.66.157.22 -P 4000 -D jerry_test -e "select number,data from esmm_data2ffm_infer_native" > ${DATA_PATH}/native.csv
mysql
-u
root
-p3SYz54LS9
#^9sBvC -h 10.66.157.22 -P 4000 -D jerry_test -e "select number,data from esmm_data2ffm_infer_nearby" > ${DATA_PATH}/nearby.csv
echo
"data2ffm"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Feature_pipline/data2ffm.py
>
${
DATA_PATH
}
/infer.log
echo
"split data"
split
-l
$((
`
wc
-l
<
${
DATA_PATH
}
/tr.csv
`
/
15
))
${
DATA_PATH
}
/tr.csv
-d
-a
4
${
DATA_PATH
}
/tr/tr_
--additional-suffix
=
.csv
...
...
@@ -43,35 +40,35 @@ rm ${DATA_PATH}/va/va_*
rm
${
DATA_PATH
}
/native/native_
*
rm
${
DATA_PATH
}
/nearby/nearby_
*
echo
"data transform time
stamp
"
echo
"data transform time"
current
=
$(
date
"+%Y-%m-%d %H:%M:%S"
)
timeStamp
=
$(
date
-d
"
$current
"
+%s
)
currentTimeStamp
=
$((
timeStamp
*
1000
+
`
date
"+%N"
`
/
1000000
))
echo
$current
TimeStamp
echo
$current
echo
"train..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
"
${
DATA_PATH
}
"
--task_type
=
"train"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
23
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
echo
"train time
stamp
"
echo
"train time"
current
=
$(
date
"+%Y-%m-%d %H:%M:%S"
)
timeStamp
=
$(
date
-d
"
$current
"
+%s
)
currentTimeStamp
=
$((
timeStamp
*
1000
+
`
date
"+%N"
`
/
1000000
))
echo
$currentTimeStamp
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/send_mail.py
echo
$current
echo
"infer native..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
"
${
DATA_PATH
}
/native"
--task_type
=
"infer"
>
${
DATA_PATH
}
/infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
echo
"infer nearby..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
"
${
DATA_PATH
}
/nearby"
--task_type
=
"infer"
>
${
DATA_PATH
}
/infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/DeepCvrMTL.py
--ctr_task_wgt
=
0.3
--learning_rate
=
0.0001
--deep_layers
=
256,128
--dropout
=
0.8,0.5
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
11
--feature_size
=
354332
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
echo
"sort and 2sql"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/sort_and_2sql.py
echo
"infer and sort and 2sql time
stamp
"
echo
"infer and sort and 2sql time"
current
=
$(
date
"+%Y-%m-%d %H:%M:%S"
)
timeStamp
=
$(
date
-d
"
$current
"
+%s
)
currentTimeStamp
=
$((
timeStamp
*
1000
+
`
date
"+%N"
`
/
1000000
))
echo
$currentTimeStamp
\ No newline at end of file
echo
$current
${
PYTHON_PATH
}
${
MODEL_PATH
}
/Model_pipline/send_mail.py
\ No newline at end of file
eda/feededa/src/main/scala/com/gmei/EsmmData.scala
View file @
246e3a1c
...
...
@@ -283,7 +283,7 @@ object EsmmPredData {
case
(
device_city
,
cid_data
)
=>
val
device_id
=
Try
(
device_city
.
split
(
","
)(
0
)).
getOrElse
(
""
)
val
city_id
=
Try
(
device_city
.
split
(
","
)(
1
)).
getOrElse
(
""
)
val
cids
=
Try
(
cid_data
.
toSeq
.
map
(
_
.
getAs
[
String
](
"merge_queue"
).
split
(
","
)).
flatMap
(
_
.
zipWithIndex
).
sortBy
(
_
.
_2
).
map
(
_
.
_1
).
distinct
.
take
(
3
00
).
mkString
(
","
)).
getOrElse
(
""
)
val
cids
=
Try
(
cid_data
.
toSeq
.
map
(
_
.
getAs
[
String
](
"merge_queue"
).
split
(
","
)).
flatMap
(
_
.
zipWithIndex
).
sortBy
(
_
.
_2
).
map
(
_
.
_1
).
distinct
.
take
(
5
00
).
mkString
(
","
)).
getOrElse
(
""
)
(
device_id
,
city_id
,
s
"$cids"
)
}.
filter
(
_
.
_3
!=
""
).
toDF
(
"device_id"
,
"city_id"
,
"merge_queue"
)
raw_data1
.
createOrReplaceTempView
(
"raw_data1"
)
...
...
@@ -312,7 +312,7 @@ object EsmmPredData {
val
native_data1
=
sc
.
sql
(
s
"""
|select device_id,city_id as ucity_id,
|explode(split(split(native_queue, concat(',',split(native_queue,',')[
3
00]))[0],',')) as cid_id
|explode(split(split(native_queue, concat(',',split(native_queue,',')[
5
00]))[0],',')) as cid_id
|from native_data
"""
.
stripMargin
).
withColumn
(
"label"
,
lit
(
0
))
...
...
eda/merge_sort.py
0 → 100644
View file @
246e3a1c
def
merge_sort
(
lst
):
if
len
(
lst
)
<=
1
:
return
lst
middle
=
int
(
len
(
lst
)
/
2
)
left
=
merge_sort
(
lst
[:
middle
])
right
=
merge_sort
(
lst
[
middle
:])
merged
=
[]
while
left
and
right
:
merged
.
append
(
left
.
pop
(
0
)
if
left
[
0
]
<=
right
[
0
]
else
right
.
pop
(
0
))
merged
.
extend
(
right
if
right
else
left
)
return
merged
data_lst
=
[
6
,
202
,
100
,
301
,
38
,
8
,
1
]
print
(
merge_sort
(
data_lst
))
\ No newline at end of file
tensnsorflow/ffm.py
View file @
246e3a1c
...
...
@@ -140,16 +140,23 @@ def get_data():
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select max(stat_date) from esmm_train_data"
validate_date
=
con_sql
(
db
,
sql
)[
0
]
.
values
.
tolist
()[
0
]
print
(
"validate_date:"
+
validate_date
)
print
(
"validate_date:"
+
validate_date
)
temp
=
datetime
.
datetime
.
strptime
(
validate_date
,
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
14
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
15
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
print
(
start
)
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select device_id,y,z,stat_date,ucity_id,cid_id,clevel1_id,ccity_name from esmm_train_data "
\
"where stat_date >= '{}'"
.
format
(
start
)
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"device_id"
,
1
:
"y"
,
2
:
"z"
,
3
:
"stat_date"
,
4
:
"ucity_id"
,
5
:
"cid_id"
,
6
:
"clevel1_id"
,
7
:
"ccity_name"
})
sql
=
"select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,"
\
"home.jingxuan,home.zhibo,home.nose,home.eyes,home.weizheng,home.teeth,home.lunkuo,"
\
"home.meifu,home.xizhi,home.zhifang,home.longxiong,home.simi,home.maofa,home.gongli,home.korea "
\
"from esmm_train_data e left join user_feature u on e.device_id = u.device_id "
\
"left join home_tab_click home on e.device_id = home.device_id "
\
"where e.stat_date >= '{}'"
.
format
(
start
)
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"device_id"
,
1
:
"y"
,
2
:
"z"
,
3
:
"stat_date"
,
4
:
"ucity_id"
,
5
:
"cid_id"
,
6
:
"clevel1_id"
,
7
:
"ccity_name"
})
print
(
"esmm data ok"
)
print
(
df
.
head
(
2
))
ucity_id
=
list
(
set
(
df
[
"ucity_id"
]
.
values
.
tolist
()))
cid
=
list
(
set
(
df
[
"cid_id"
]
.
values
.
tolist
()))
df
[
"clevel1_id"
]
=
df
[
"clevel1_id"
]
.
astype
(
"str"
)
...
...
@@ -158,16 +165,16 @@ def get_data():
df
[
"z"
]
=
df
[
"z"
]
.
astype
(
"str"
)
df
[
"y"
]
=
df
[
"stat_date"
]
.
str
.
cat
([
df
[
"device_id"
]
.
values
.
tolist
(),
df
[
"ucity_id"
]
.
values
.
tolist
(),
df
[
"cid_id"
]
.
values
.
tolist
(),
df
[
"y"
]
.
values
.
tolist
(),
df
[
"z"
]
.
values
.
tolist
()],
sep
=
","
)
df
=
df
.
drop
(
"z"
,
axis
=
1
)
df
=
pd
.
merge
(
df
,
get_statistics
(),
how
=
'left'
,
on
=
"device_id"
)
.
fillna
(
0
)
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
df
=
df
.
drop
([
"z"
,
"device_id"
],
axis
=
1
)
.
fillna
(
0.0
)
print
(
df
.
head
(
2
))
print
(
"fields:{}"
.
format
(
df
.
shape
[
1
]
-
1
))
print
(
"features:{}"
.
format
(
len
(
cid
)))
return
df
,
validate_date
,
ucity_id
,
cid
def
transform
(
a
,
validate_date
):
model
=
multiFFMFormatPandas
()
df
=
model
.
fit_transform
(
a
,
y
=
"y"
,
n
=
160000
,
processes
=
2
2
)
df
=
model
.
fit_transform
(
a
,
y
=
"y"
,
n
=
160000
,
processes
=
2
6
)
df
=
pd
.
DataFrame
(
df
)
df
[
"stat_date"
]
=
df
[
0
]
.
apply
(
lambda
x
:
x
.
split
(
","
)[
0
])
df
[
"device_id"
]
=
df
[
0
]
.
apply
(
lambda
x
:
x
.
split
(
","
)[
1
])
...
...
@@ -187,51 +194,30 @@ def transform(a,validate_date):
test
=
test
.
drop
(
"stat_date"
,
axis
=
1
)
# print("train shape")
# print(train.shape)
train
.
to_csv
(
path
+
"train.csv"
,
sep
=
"
\t
"
,
index
=
False
)
test
.
to_csv
(
path
+
"test.csv"
,
sep
=
"
\t
"
,
index
=
False
)
#
train.to_csv(path + "train.csv", sep="\t", index=False)
#
test.to_csv(path + "test.csv", sep="\t", index=False)
return
model
def
get_user_feature
():
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select max(stat_date) from esmm_train_data"
validate_date
=
con_sql
(
db
,
sql
)[
0
]
.
values
.
tolist
()[
0
]
print
(
"validate_date:"
+
validate_date
)
temp
=
datetime
.
datetime
.
strptime
(
validate_date
,
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
2
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,home.total"
\
"from (esmm_train_data e left join user_feature u on e.device_id = u.device_id) "
\
"left join home_tab_click home on e.device_id = home.device_id"
\
"where e.stat_date >= '{}'"
.
format
(
start
)
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"device_id"
,
1
:
"y"
,
2
:
"z"
,
3
:
"stat_date"
,
4
:
"ucity_id"
,
5
:
"cid_id"
,
6
:
"clevel1_id"
,
7
:
"ccity_name"
})
print
(
df
.
head
(
2
))
def
get_statistics
():
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select device_id,total,精选,直播,鼻部,眼部,微整,牙齿,轮廓,美肤抗衰,"
\
"吸脂,脂肪填充,隆胸,私密,毛发管理,公立,韩国 from home_tab_click"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"device_id"
,
1
:
"total"
})
for
i
in
df
.
columns
.
difference
([
"device_id"
,
"total"
]):
df
[
i
]
=
df
[
i
]
/
df
[
"total"
]
df
[
i
]
=
df
[
i
]
.
apply
(
lambda
x
:
format
(
x
,
".4f"
))
df
[
i
]
=
df
[
i
]
.
astype
(
"float"
)
df
=
df
.
drop
(
"total"
,
axis
=
1
)
return
df
def
get_predict_set
(
ucity_id
,
cid
,
model
):
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select device_id,y,z,stat_date,ucity_id,cid_id,clevel1_id,ccity_name,label from esmm_pre_data"
sql
=
"select e.device_id,e.y,e.z,e.stat_date,e.ucity_id,e.cid_id,e.clevel1_id,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,"
\
"home.jingxuan,home.zhibo,home.nose,home.eyes,home.weizheng,home.teeth,home.lunkuo,"
\
"home.meifu,home.xizhi,home.zhifang,home.longxiong,home.simi,home.maofa,home.gongli,home.korea,e.label "
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join home_tab_click home on e.device_id = home.device_id"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"device_id"
,
1
:
"y"
,
2
:
"z"
,
3
:
"stat_date"
,
4
:
"ucity_id"
,
5
:
"cid_id"
,
6
:
"clevel1_id"
,
7
:
"ccity_name"
,
8
:
"label"
})
6
:
"clevel1_id"
,
7
:
"ccity_name"
,
26
:
"label"
})
print
(
"before filter:"
)
print
(
df
.
shape
)
df
=
df
[
df
[
"cid_id"
]
.
isin
(
cid
)]
print
(
"after cid filter:"
)
print
(
df
.
shape
)
df
=
df
[
df
[
"ucity_id"
]
.
isin
(
ucity_id
)]
print
(
"after ucity filter:"
)
print
(
df
.
shape
)
df
[
"clevel1_id"
]
=
df
[
"clevel1_id"
]
.
astype
(
"str"
)
df
[
"cid_id"
]
=
df
[
"cid_id"
]
.
astype
(
"str"
)
...
...
@@ -241,11 +227,7 @@ def get_predict_set(ucity_id, cid,model):
df
[
"y"
]
=
df
[
"label"
]
.
str
.
cat
(
[
df
[
"device_id"
]
.
values
.
tolist
(),
df
[
"ucity_id"
]
.
values
.
tolist
(),
df
[
"cid_id"
]
.
values
.
tolist
(),
df
[
"y"
]
.
values
.
tolist
(),
df
[
"z"
]
.
values
.
tolist
()],
sep
=
","
)
df
=
df
.
drop
([
"z"
,
"label"
],
axis
=
1
)
df
=
pd
.
merge
(
df
,
get_statistics
(),
how
=
'left'
,
on
=
"device_id"
)
.
fillna
(
0
)
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
print
(
"df ok"
)
print
(
df
.
shape
)
df
=
df
.
drop
([
"z"
,
"label"
,
"device_id"
],
axis
=
1
)
.
fillna
(
0.0
)
print
(
df
.
head
(
2
))
df
=
model
.
transform
(
df
,
n
=
160000
,
processes
=
22
)
df
=
pd
.
DataFrame
(
df
)
...
...
@@ -276,15 +258,13 @@ def get_predict_set(ucity_id, cid,model):
if
__name__
==
"__main__"
:
get_user_feature
()
path
=
"/home/gmuser/ffm/"
a
=
time
.
time
()
#
df, validate_date, ucity_id, cid = get_data()
#
model = transform(df, validate_date)
#
get_predict_set(ucity_id, cid,model)
#
b = time.time()
#
print("cost(分钟)")
#
print((b-a)/60)
df
,
validate_date
,
ucity_id
,
cid
=
get_data
()
model
=
transform
(
df
,
validate_date
)
get_predict_set
(
ucity_id
,
cid
,
model
)
b
=
time
.
time
()
print
(
"cost(分钟)"
)
print
((
b
-
a
)
/
60
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment