Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
c4d53c9d
Commit
c4d53c9d
authored
Apr 12, 2019
by
王志伟
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
测试知识图谱的数据,测试3天
parent
34ef13e2
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
8 deletions
+15
-8
feature.py
tensnsorflow/es/feature.py
+15
-8
No files found.
tensnsorflow/es/feature.py
View file @
c4d53c9d
...
...
@@ -32,7 +32,7 @@ def get_data():
validate_date
=
con_sql
(
db
,
sql
)[
0
]
.
values
.
tolist
()[
0
]
print
(
"validate_date:"
+
validate_date
)
temp
=
datetime
.
datetime
.
strptime
(
validate_date
,
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
3
00
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
start
=
(
temp
-
datetime
.
timedelta
(
days
=
3
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
print
(
start
)
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer,"
\
...
...
@@ -83,7 +83,8 @@ def get_data():
unique_values
=
[]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -98,7 +99,8 @@ def get_data():
train
=
df
[
df
[
"stat_date"
]
!=
validate_date
+
"stat_date"
]
test
=
df
[
df
[
"stat_date"
]
==
validate_date
+
"stat_date"
]
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]:
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]:
train
[
i
]
=
train
[
i
]
.
map
(
value_map
)
test
[
i
]
=
test
[
i
]
.
map
(
value_map
)
...
...
@@ -139,16 +141,19 @@ def get_predict(date,value_map,app_list_map,level2_map,level3_map):
db
=
pymysql
.
connect
(
host
=
'10.66.157.22'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
sql
=
"select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,"
\
"dl.app_list,e.hospital_id,feat.level3_ids "
\
"dl.app_list,e.hospital_id,feat.level3_ids
,k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time
"
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id limit 600"
"left join diary_feat feat on e.cid_id = feat.diary_id "
\
"left join train_Knowledge_network_data k on feat.level2 = k.level2_id limit 600"
df
=
con_sql
(
db
,
sql
)
df
=
df
.
rename
(
columns
=
{
0
:
"y"
,
1
:
"z"
,
2
:
"label"
,
3
:
"ucity_id"
,
4
:
"clevel2_id"
,
5
:
"ccity_name"
,
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"device_id"
,
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list"
,
14
:
"hospital_id"
,
15
:
"level3_ids"
})
11
:
"cid_id"
,
12
:
"time"
,
13
:
"app_list"
,
14
:
"hospital_id"
,
15
:
"level3_ids"
,
16
:
"method"
,
17
:
"min"
,
18
:
"max"
,
19
:
"treatment_time"
,
20
:
"maintain_time"
,
21
:
"recover_time"
})
df
[
"stat_date"
]
=
date
print
(
df
.
head
(
6
))
...
...
@@ -164,7 +169,8 @@ def get_predict(date,value_map,app_list_map,level2_map,level3_map):
df
[
"uid"
]
=
df
[
"device_id"
]
df
[
"city"
]
=
df
[
"ucity_id"
]
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -176,7 +182,8 @@ def get_predict(date,value_map,app_list_map,level2_map,level3_map):
nearby_pre
=
nearby_pre
.
drop
(
"label"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
]:
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"method"
,
"min"
,
"max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]:
native_pre
[
i
]
=
native_pre
[
i
]
.
map
(
value_map
)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre
[
i
]
=
native_pre
[
i
]
.
fillna
(
0
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment