Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
fa28e054
Commit
fa28e054
authored
Apr 09, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
esmm测试项目增加医院id、日记三级类别
parent
98b5d9b2
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
26 additions
and
18 deletions
+26
-18
feature.py
tensnsorflow/es/feature.py
+26
-18
No files found.
tensnsorflow/es/feature.py
View file @
fa28e054
...
...
@@ -54,14 +54,20 @@ def get_data():
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
11
:
"time"
,
12
:
"app_list"
,
13
:
"service_id"
,
14
:
"level3_ids"
})
print
(
"esmm data ok"
)
print
(
df
.
shape
)
# print(df.head(2
)
print
(
df
.
head
(
2
)
)
service_id
=
tuple
(
df
[
"service_id"
]
.
unique
())
db
=
pymysql
.
connect
(
host
=
'rdsfewzdmf0jfjp9un8xj.mysql.rds.aliyuncs.com'
,
port
=
3306
,
user
=
'work'
,
passwd
=
'BJQaT9VzDcuPBqkd'
,
db
=
'zhengxing'
)
sql
=
"select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id where s.id in {}"
.
format
(
service_id
)
db
=
pymysql
.
connect
(
host
=
'rdsfewzdmf0jfjp9un8xj.mysql.rds.aliyuncs.com'
,
port
=
3306
,
user
=
'work'
,
passwd
=
'BJQaT9VzDcuPBqkd'
,
db
=
'zhengxing'
)
sql
=
"select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id "
\
"where s.id in {}"
.
format
(
service_id
)
hospital
=
con_sql
(
db
,
sql
)
hospital
=
hospital
.
rename
(
columns
=
{
0
:
"service_id"
,
1
:
"hospital_id"
})
print
(
hospital
.
head
())
df
=
pd
.
merge
(
df
,
hospital
,
on
=
'service_id'
,
how
=
'left'
)
df
=
df
.
drop
(
"service_id"
,
axis
=
1
)
print
(
df
.
head
())
print
(
"before"
)
print
(
df
.
shape
)
...
...
@@ -70,9 +76,9 @@ def get_data():
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"clevel2_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list"
,
"hospital_id"
,
"level3_ids"
])
app_list_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
1
)
level2_number
,
level2_map
=
multi_hot
(
df
,
"clevel2_id"
,
1
+
app_list_number
)
level3_number
,
level3_
ids
=
multi_hot
(
df
,
"hospital_id"
,
1
+
app_list_number
+
level2_number
)
app_list_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
2
)
level2_number
,
level2_map
=
multi_hot
(
df
,
"clevel2_id"
,
2
+
app_list_number
)
level3_number
,
level3_
map
=
multi_hot
(
df
,
"level3_ids"
,
2
+
app_list_number
+
level2_number
)
unique_values
=
[]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
...
...
@@ -84,7 +90,7 @@ def get_data():
df
[
i
]
=
df
[
i
]
+
i
unique_values
.
extend
(
list
(
df
[
i
]
.
unique
()))
temp
=
list
(
range
(
1
+
app_list_number
+
level2_number
+
level3_number
,
1
+
app_list_number
+
level2_number
+
level3_number
+
len
(
unique_values
)))
temp
=
list
(
range
(
2
+
app_list_number
+
level2_number
+
level3_number
,
2
+
app_list_number
+
level2_number
+
level3_number
+
len
(
unique_values
)))
value_map
=
dict
(
zip
(
unique_values
,
temp
))
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
...
...
@@ -103,7 +109,7 @@ def get_data():
write_csv
(
train
,
"tr"
,
100000
)
write_csv
(
test
,
"va"
,
80000
)
return
validate_date
,
value_map
,
app_list_map
,
level2_map
return
validate_date
,
value_map
,
app_list_map
,
level2_map
,
level3_map
def
app_list_func
(
x
,
l
):
...
...
@@ -128,10 +134,11 @@ def write_csv(df,name,n):
temp
.
to_csv
(
path
+
name
+
"/{}_{}.csv"
.
format
(
name
,
i
),
index
=
False
)
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
):
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
,
level3_map
):
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,dl.app_list "
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,"
\
"dl.app_list,e.hospital_id,feat.level3_ids "
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
...
...
@@ -139,8 +146,8 @@ def get_predict(date,value_map,app_list_map,level2_map):
"left join diary_feat feat on e.cid_id = feat.diary_id"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel2_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
1
0
:
"device_id"
,
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list
"
})
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
1
1
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list"
,
14
:
"hospital_id"
,
15
:
"level3_ids
"
})
df
[
"stat_date"
]
=
date
print
(
df
.
head
(
6
))
...
...
@@ -148,13 +155,15 @@ def get_predict(date,value_map,app_list_map,level2_map):
df
[
"app_list"
]
=
df
[
"app_list"
]
.
apply
(
app_list_func
,
args
=
(
app_list_map
,))
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
fillna
(
"lost_na"
)
df
[
"clevel2_id"
]
=
df
[
"clevel2_id"
]
.
apply
(
app_list_func
,
args
=
(
level2_map
,))
df
[
"level3_ids"
]
=
df
[
"level3_ids"
]
.
fillna
(
"lost_na"
)
df
[
"level3_ids"
]
=
df
[
"level3_ids"
]
.
apply
(
app_list_func
,
args
=
(
level3_map
,))
# print("predict shape")
# print(df.shape)
df
[
"uid"
]
=
df
[
"device_id"
]
df
[
"city"
]
=
df
[
"ucity_id"
]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -166,7 +175,7 @@ def get_predict(date,value_map,app_list_map,level2_map):
nearby_pre
=
nearby_pre
.
drop
(
"label"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]:
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]:
native_pre
[
i
]
=
native_pre
[
i
]
.
map
(
value_map
)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre
[
i
]
=
native_pre
[
i
]
.
fillna
(
0
)
...
...
@@ -175,7 +184,6 @@ def get_predict(date,value_map,app_list_map,level2_map):
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
nearby_pre
[
i
]
=
nearby_pre
[
i
]
.
fillna
(
0
)
print
(
"native"
)
print
(
native_pre
.
shape
)
...
...
@@ -192,6 +200,6 @@ def get_predict(date,value_map,app_list_map,level2_map):
if
__name__
==
'__main__'
:
train_data_set
=
"esmm_train_data"
path
=
"/data/esmm/"
date
,
value
,
app_list
,
level2
=
get_data
()
# get_predict(date, value,app_list,level2
)
date
,
value
,
app_list
,
level2
,
level3
=
get_data
()
get_predict
(
date
,
value
,
app_list
,
level2
,
level3
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment