Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
3bfe07d5
Commit
3bfe07d5
authored
Sep 05, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
修改rerank文件每次写入的数量
parent
ff7a6e10
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
18 deletions
+10
-18
feature_engineering.py
eda/esmm/Model_pipline/feature_engineering.py
+10
-18
No files found.
eda/esmm/Model_pipline/feature_engineering.py
View file @
3bfe07d5
...
...
@@ -159,9 +159,6 @@ def feature_engineer():
sql
=
"select distinct recover_time from knowledge"
unique_values
.
extend
(
get_unique
(
db
,
sql
))
cid_star
=
[
"star_0"
,
"star_1"
,
"star_2"
,
"star_3"
,
"star_4"
,
"star_5"
,
"star_3.5"
]
unique_values
.
extend
(
cid_star
)
# unique_values.append("video")
db
=
pymysql
.
connect
(
host
=
'172.16.40.158'
,
port
=
4000
,
user
=
'root'
,
passwd
=
'3SYz54LS9#^9sBvC'
,
db
=
'jerry_test'
)
...
...
@@ -182,7 +179,7 @@ def feature_engineer():
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"app_list"
,
"level3_ids"
,
"level2_ids"
,
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
,
"search_tag2"
,
"search_tag3"
,
"content_level"
]
"search_tag2"
,
"search_tag3"
]
unique_values
.
extend
(
features
)
print
(
"unique_values length"
)
print
(
len
(
unique_values
))
...
...
@@ -198,7 +195,7 @@ def feature_engineer():
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4,"
\
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7,doris.search_tag2,doris.search_tag3,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time,"
\
"e.device_id,e.cid_id
,concat('star','_',star.content_level) as content_level
"
\
"e.device_id,e.cid_id "
\
"from jerry_test.esmm_train_data_dwell e left join jerry_test.user_feature u on e.device_id = u.device_id "
\
"left join jerry_test.cid_type_top c on e.device_id = c.device_id "
\
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid "
\
...
...
@@ -215,14 +212,13 @@ def feature_engineer():
"left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id "
\
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id "
\
"left join jerry_test.search_doris doris on e.device_id = doris.device_id and e.stat_date = doris.get_date "
\
"left join eagle.src_mimas_prod_api_diary star on e.cid_id = star.id "
\
"where e.stat_date >= '{}'"
.
format
(
start
)
df
=
spark
.
sql
(
sql
)
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"level2_ids"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list"
,
"hospital_id"
,
"level3_ids"
,
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
,
"content_level"
])
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
])
df
=
df
.
na
.
fill
(
dict
(
zip
(
features
,
features
)))
...
...
@@ -230,8 +226,7 @@ def feature_engineer():
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"search_tag2"
,
"search_tag3"
,
"cid_id"
,
"device_id"
,
"content_level"
)
\
"maintain_time"
,
"recover_time"
,
"search_tag2"
,
"search_tag3"
,
"cid_id"
,
"device_id"
)
\
.
rdd
.
repartition
(
200
)
.
map
(
lambda
x
:
(
x
[
0
],
float
(
x
[
1
]),
float
(
x
[
2
]),
app_list_func
(
x
[
3
],
app_list_map
),
app_list_func
(
x
[
4
],
leve2_map
),
app_list_func
(
x
[
5
],
leve3_map
),
app_list_func
(
x
[
6
],
leve2_map
),
app_list_func
(
x
[
7
],
leve2_map
),
...
...
@@ -241,7 +236,7 @@ def feature_engineer():
value_map
.
get
(
x
[
16
],
5
),
value_map
.
get
(
x
[
17
],
6
),
value_map
.
get
(
x
[
18
],
7
),
value_map
.
get
(
x
[
19
],
8
),
value_map
.
get
(
x
[
20
],
9
),
value_map
.
get
(
x
[
21
],
10
),
value_map
.
get
(
x
[
22
],
11
),
value_map
.
get
(
x
[
23
],
12
),
value_map
.
get
(
x
[
24
],
13
),
value_map
.
get
(
x
[
25
],
14
),
value_map
.
get
(
x
[
26
],
15
)
,
value_map
.
get
(
x
[
31
],
16
)
],
value_map
.
get
(
x
[
25
],
14
),
value_map
.
get
(
x
[
26
],
15
)],
app_list_func
(
x
[
27
],
leve2_map
),
app_list_func
(
x
[
28
],
leve3_map
),
x
[
13
],
x
[
29
],
x
[
30
]
))
...
...
@@ -291,8 +286,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
"dl.app_list,e.hospital_id,feat.level3_ids,"
\
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4,"
\
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7,doris.search_tag2,doris.search_tag3,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time,"
\
"concat('star','_',star.content_level) as content_level "
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"from jerry_test.esmm_pre_data e "
\
"left join jerry_test.user_feature u on e.device_id = u.device_id "
\
"left join jerry_test.cid_type_top c on e.device_id = c.device_id "
\
...
...
@@ -307,15 +301,13 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id "
\
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id "
\
"left join jerry_test.knowledge k on feat.level2 = k.level2_id "
\
"left join jerry_test.search_doris doris on e.device_id = doris.device_id and e.stat_date = doris.get_date "
\
"left join eagle.src_mimas_prod_api_diary star on e.cid_id = star.id "
\
"where e.device_id = '868771031984211'"
"left join jerry_test.search_doris doris on e.device_id = doris.device_id and e.stat_date = doris.get_date"
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"app_list"
,
"level3_ids"
,
"level2_ids"
,
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
,
"search_tag2"
,
"search_tag3"
,
"content_level"
]
"search_tag2"
,
"search_tag3"
]
df
=
spark
.
sql
(
sql
)
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"device_id"
,
"cid_id"
])
...
...
@@ -326,7 +318,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
"tag1"
,
"tag2"
,
"tag3"
,
"tag4"
,
"tag5"
,
"tag6"
,
"tag7"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"search_tag2"
,
"search_tag3"
,
"content_level"
)
\
"maintain_time"
,
"recover_time"
,
"search_tag2"
,
"search_tag3"
)
\
.
rdd
.
repartition
(
200
)
.
map
(
lambda
x
:
(
x
[
0
],
float
(
x
[
1
]),
float
(
x
[
2
]),
x
[
3
],
x
[
4
],
x
[
5
],
app_list_func
(
x
[
6
],
app_list_map
),
app_list_func
(
x
[
7
],
leve2_map
),
app_list_func
(
x
[
8
],
leve3_map
),
app_list_func
(
x
[
9
],
leve2_map
),
...
...
@@ -340,7 +332,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
value_map
.
get
(
x
[
23
],
9
),
value_map
.
get
(
x
[
24
],
10
),
value_map
.
get
(
x
[
25
],
11
),
value_map
.
get
(
x
[
26
],
12
),
value_map
.
get
(
x
[
27
],
13
),
value_map
.
get
(
x
[
28
],
14
),
value_map
.
get
(
x
[
29
],
15
)
,
value_map
.
get
(
x
[
32
],
16
)
],
value_map
.
get
(
x
[
29
],
15
)],
app_list_func
(
x
[
30
],
leve2_map
),
app_list_func
(
x
[
31
],
leve3_map
)))
rdd
.
persist
(
storageLevel
=
StorageLevel
.
MEMORY_ONLY_SER
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment