Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
352626c8
Commit
352626c8
authored
Jun 12, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
规范特征工程文件中一个变量名字
parent
278c0203
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
29 additions
and
4 deletions
+29
-4
feature_test.py
tensnsorflow/feature_test.py
+29
-4
No files found.
tensnsorflow/feature_test.py
View file @
352626c8
...
@@ -309,7 +309,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
...
@@ -309,7 +309,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
native_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
0
)
.
map
(
lambda
x
:(
x
[
3
],
x
[
4
],
x
[
5
])))
\
native_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
0
)
.
map
(
lambda
x
:(
x
[
3
],
x
[
4
],
x
[
5
])))
\
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
print
(
"native csv"
)
print
(
"native csv"
)
native_pre
.
coalesce
(
1
)
.
write
.
format
(
'com.databricks.spark.csv'
)
.
save
(
path
+
"native/"
,
header
=
'true'
)
native_pre
.
repartion
(
1
)
.
write
.
format
(
'com.databricks.spark.csv'
)
.
save
(
path
+
"native/"
,
header
=
'true'
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
0
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
0
)
.
map
(
lambda
x
:
(
x
[
1
],
x
[
2
],
x
[
6
],
x
[
7
],
x
[
8
],
x
[
9
],
x
[
10
],
x
[
11
],
x
[
12
],
x
[
13
],
x
[
14
],
x
[
15
],
x
[
16
])))
\
.
map
(
lambda
x
:
(
x
[
1
],
x
[
2
],
x
[
6
],
x
[
7
],
x
[
8
],
x
[
9
],
x
[
10
],
x
[
11
],
x
[
12
],
x
[
13
],
x
[
14
],
x
[
15
],
x
[
16
])))
\
.
toDF
(
"y"
,
"z"
,
"app_list"
,
"level2_list"
,
"level3_list"
,
"tag1_list"
,
"tag2_list"
,
"tag3_list"
,
"tag4_list"
,
.
toDF
(
"y"
,
"z"
,
"app_list"
,
"level2_list"
,
"level3_list"
,
"tag1_list"
,
"tag2_list"
,
"tag3_list"
,
"tag4_list"
,
...
@@ -322,7 +322,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
...
@@ -322,7 +322,7 @@ def get_predict(date,value_map,app_list_map,leve2_map,leve3_map):
nearby_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
1
)
.
map
(
lambda
x
:
(
x
[
3
],
x
[
4
],
x
[
5
])))
\
nearby_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
1
)
.
map
(
lambda
x
:
(
x
[
3
],
x
[
4
],
x
[
5
])))
\
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
print
(
"nearby csv"
)
print
(
"nearby csv"
)
nearby_pre
.
coalesce
(
1
)
.
write
.
format
(
'com.databricks.spark.csv'
)
.
save
(
path
+
"nearby/"
,
header
=
'true'
)
nearby_pre
.
repartion
(
1
)
.
write
.
format
(
'com.databricks.spark.csv'
)
.
save
(
path
+
"nearby/"
,
header
=
'true'
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
1
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
0
]
==
1
)
.
map
(
.
map
(
...
@@ -350,8 +350,33 @@ if __name__ == '__main__':
...
@@ -350,8 +350,33 @@ if __name__ == '__main__':
path
=
"hdfs:///strategy/esmm/"
path
=
"hdfs:///strategy/esmm/"
local_path
=
"/home/gmuser/esmm/"
local_path
=
"/home/gmuser/esmm/"
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
=
feature_engineer
()
# validate_date, value_map, app_list_map, leve2_map, leve3_map = feature_engineer()
get_predict
(
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
)
# get_predict(validate_date, value_map, app_list_map, leve2_map, leve3_map)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer,"
\
"u.channel,c.top,cut.time,dl.app_list,feat.level3_ids,doctor.hospital_id,"
\
"wiki.tag as tag1,question.tag as tag2,search.tag as tag3,budan.tag as tag4,"
\
"ot.tag as tag5,sixin.tag as tag6,cart.tag as tag7,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"from jerry_test.esmm_train_data_dwell e left join jerry_test.user_feature u on e.device_id = u.device_id "
\
"left join jerry_test.cid_type_top c on e.device_id = c.device_id "
\
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid "
\
"left join jerry_test.device_app_list dl on e.device_id = dl.device_id "
\
"left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id "
\
"left join jerry_test.knowledge k on feat.level2 = k.level2_id "
\
"left join jerry_test.wiki_tag wiki on e.device_id = wiki.device_id "
\
"left join jerry_test.question_tag question on e.device_id = question.device_id "
\
"left join jerry_test.search_tag search on e.device_id = search.device_id "
\
"left join jerry_test.budan_tag budan on e.device_id = budan.device_id "
\
"left join jerry_test.order_tag ot on e.device_id = ot.device_id "
\
"left join jerry_test.sixin_tag sixin on e.device_id = sixin.device_id "
\
"left join jerry_test.cart_tag cart on e.device_id = cart.device_id "
\
"left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id "
\
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id "
\
"where e.stat_date >= '2019-06-10'"
df
=
spark
.
sql
(
sql
)
df
.
save
(
"hdfs:///strategy/esmm/native/d.csv"
,
"com.databricks.spark.csv"
)
spark
.
stop
()
spark
.
stop
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment