Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
b4d5aaaf
Commit
b4d5aaaf
authored
May 22, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
change test file
parent
1963df08
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
33 additions
and
119 deletions
+33
-119
multi.py
tensnsorflow/multi.py
+33
-119
No files found.
tensnsorflow/multi.py
View file @
b4d5aaaf
...
...
@@ -6,7 +6,6 @@ import pytispark.pytispark as pti
from
pyspark.sql
import
SparkSession
import
datetime
import
pandas
as
pd
import
numpy
as
np
def
app_list_func
(
x
,
l
):
...
...
@@ -42,147 +41,76 @@ def feature_engineer():
start
=
(
temp
-
datetime
.
timedelta
(
days
=
2
))
.
strftime
(
"
%
Y-
%
m-
%
d"
)
print
(
start
)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer,"
\
"u.channel,c.top,cut.time,dl.app_list,feat.level3_ids,doctor.hospital_id,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"from jerry_test.esmm_train_data e left join jerry_test.user_feature u on e.device_id = u.device_id "
\
"left join jerry_test.cid_type_top c on e.device_id = c.device_id "
\
"left join jerry_test.cid_time_cut cut on e.cid_id = cut.cid "
\
"left join jerry_test.device_app_list dl on e.device_id = dl.device_id "
\
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids "
\
"from jerry_test.esmm_train_data e "
\
"left join jerry_test.diary_feat feat on e.cid_id = feat.diary_id "
\
"left join jerry_test.train_Knowledge_network_data k on feat.level2 = k.level2_id "
\
"left join eagle.src_zhengxing_api_service service on e.diary_service_id = service.id "
\
"left join eagle.src_zhengxing_api_doctor doctor on service.doctor_id = doctor.id "
\
"where e.stat_date >= '{}'"
.
format
(
start
)
df
=
spark
.
sql
(
sql
)
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"level2_ids"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"app_list"
,
"hospital_id"
,
"level3_ids"
])
features
=
[
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
features
=
[
"ucity_id"
,
"stat_date"
]
df
=
df
.
na
.
fill
(
dict
(
zip
(
features
,
features
)))
apps_number
,
app_list_map
=
multi_hot
(
df
,
"app_list"
,
1
)
level2_number
,
leve2_map
=
multi_hot
(
df
,
"level2_ids"
,
1
+
apps_number
)
level3_number
,
leve3_map
=
multi_hot
(
df
,
"level3_ids"
,
1
+
apps_number
+
level2_number
)
apps_number
,
app_list_map
=
multi_hot
(
df
,
"level2_ids"
,
1
)
unique_values
=
[]
for
i
in
features
:
unique_values
.
extend
(
df
.
select
(
i
)
.
distinct
()
.
rdd
.
map
(
lambda
x
:
x
[
0
])
.
collect
())
temp
=
list
(
range
(
2
+
apps_number
+
level2_number
+
level3_number
,
2
+
apps_number
+
le
vel2_number
+
level3_number
+
le
n
(
unique_values
)))
temp
=
list
(
range
(
2
+
apps_number
,
2
+
apps_number
+
len
(
unique_values
)))
value_map
=
dict
(
zip
(
unique_values
,
temp
))
rdd
=
df
.
select
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"stat_date"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"y"
,
"z"
)
.
rdd
rdd
=
df
.
select
(
"level2_ids"
,
"stat_date"
,
"ucity_id"
,
"y"
,
"z"
)
.
rdd
rdd
.
persist
()
# TODO 上线后把下面train fliter 删除,因为最近一天的数据也要作为训练集
train
=
rdd
.
filter
(
lambda
x
:
x
[
3
]
!=
validate_date
)
\
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),
app_list_func
(
x
[
1
],
leve2_map
),
app_list_func
(
x
[
2
],
leve3_map
),
value_map
[
x
[
3
]],
value_map
[
x
[
4
]],
value_map
[
x
[
5
]],
value_map
[
x
[
6
]],
value_map
[
x
[
7
]],
value_map
[
x
[
8
]],
value_map
[
x
[
9
]],
value_map
[
x
[
10
]],
value_map
[
x
[
11
]],
value_map
[
x
[
12
]],
value_map
[
x
[
13
]],
value_map
[
x
[
14
]],
value_map
[
x
[
15
]],
value_map
[
x
[
16
]],
value_map
[
x
[
17
]],
x
[
18
],
x
[
19
]))
test
=
rdd
.
filter
(
lambda
x
:
x
[
3
]
==
validate_date
)
\
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),
app_list_func
(
x
[
1
],
leve2_map
),
app_list_func
(
x
[
2
],
leve3_map
),
value_map
[
x
[
3
]],
value_map
[
x
[
4
]],
value_map
[
x
[
5
]],
value_map
[
x
[
6
]],
value_map
[
x
[
7
]],
value_map
[
x
[
8
]],
value_map
[
x
[
9
]],
value_map
[
x
[
10
]],
value_map
[
x
[
11
]],
value_map
[
x
[
12
]],
value_map
[
x
[
13
]],
value_map
[
x
[
14
]],
value_map
[
x
[
15
]],
value_map
[
x
[
16
]],
value_map
[
x
[
17
]],
x
[
18
],
x
[
19
]))
spark
.
createDataFrame
(
test
)
.
toDF
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"stat_date"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"y"
,
"z"
)
\
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
option
(
"recordType"
,
"SequenceExample"
)
.
save
(
path
=
path
+
"va/"
,
mode
=
"overwrite"
)
train
=
rdd
.
filter
(
lambda
x
:
x
[
1
]
!=
validate_date
)
\
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),[
value_map
[
x
[
2
]],
value_map
[
x
[
1
]]],
x
[
3
],
x
[
4
]))
test
=
rdd
.
filter
(
lambda
x
:
x
[
1
]
==
validate_date
)
\
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),[
value_map
[
x
[
2
]],
value_map
[
x
[
1
]]],
x
[
3
],
x
[
4
]))
spark
.
createDataFrame
(
test
)
.
toDF
(
"level2_ids"
,
"ids"
,
"y"
,
"z"
)
\
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
save
(
path
=
path
+
"va/"
,
mode
=
"overwrite"
)
print
(
"va write done"
)
spark
.
createDataFrame
(
train
)
.
toDF
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"stat_date"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"y"
,
"z"
)
\
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
option
(
"recordType"
,
"SequenceExample"
)
.
save
(
path
=
path
+
"tr/"
,
mode
=
"overwrite"
)
spark
.
createDataFrame
(
train
)
.
toDF
(
"level2_ids"
,
"ids"
,
"y"
,
"z"
)
\
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
save
(
path
=
path
+
"tr/"
,
mode
=
"overwrite"
)
print
(
"done"
)
rdd
.
unpersist
()
return
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
return
validate_date
,
value_map
,
app_list_map
def
get_predict
(
date
,
value_map
,
app_list_map
,
level2_map
,
level3_map
):
sql
=
"select e.y,e.z,e.label,e.ucity_id,feat.level2_ids,e.ccity_name,"
\
"u.device_type,u.manufacturer,u.channel,c.top,e.device_id,e.cid_id,cut.time,"
\
"dl.app_list,e.hospital_id,feat.level3_ids,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"from esmm_pre_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_time_cut cut on e.cid_id = cut.cid "
\
"left join device_app_list dl on e.device_id = dl.device_id "
\
"left join diary_feat feat on e.cid_id = feat.diary_id "
\
"left join train_Knowledge_network_data k on feat.level2 = k.level2_id"
def
get_predict
(
date
,
value_map
,
app_list_map
):
sql
=
"select e.y,e.z,e.label,e.ucity_id,feat.level2_ids from esmm_pre_data e "
\
"left join diary_feat feat on e.cid_id = feat.diary_id"
features
=
[
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
]
features
=
[
"ucity_id"
]
df
=
spark
.
sql
(
sql
)
df
=
df
.
na
.
fill
(
dict
(
zip
(
features
,
features
)))
df
=
df
.
drop_duplicates
([
"ucity_id"
,
"level2_ids"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"device_id"
,
"cid_id"
,
"label"
,
"channel"
,
"top"
,
"time"
,
"app_list"
,
"hospital_id"
,
"level3_ids"
])
rdd
=
df
.
select
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"ucity_id"
,
"device_id"
,
"cid_id"
,
"label"
,
"y"
,
"z"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"top"
)
\
.
rdd
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),
app_list_func
(
x
[
1
],
level2_map
),
app_list_func
(
x
[
2
],
level3_map
),
x
[
3
],
x
[
4
],
x
[
5
],
x
[
6
],
x
[
7
],
x
[
8
],
value_map
.
get
(
x
[
3
],
300000
),
value_map
.
get
(
x
[
9
],
299999
),
value_map
.
get
(
x
[
10
],
299998
),
value_map
.
get
(
x
[
11
],
299997
),
value_map
.
get
(
x
[
12
],
299996
),
value_map
.
get
(
x
[
13
],
299995
),
value_map
.
get
(
x
[
14
],
299994
),
value_map
.
get
(
x
[
15
],
299993
),
value_map
.
get
(
x
[
16
],
299992
),
value_map
.
get
(
x
[
17
],
299991
),
value_map
.
get
(
x
[
18
],
299990
),
value_map
.
get
(
x
[
19
],
299989
),
value_map
.
get
(
x
[
20
],
299988
),
value_map
.
get
(
x
[
21
],
299987
),
value_map
[
date
]))
rdd
=
df
.
select
(
"level2_ids"
,
"ucity_id"
,
"device_id"
,
"cid_id"
,
"label"
,
"y"
,
"z"
)
\
.
rdd
.
map
(
lambda
x
:
(
app_list_func
(
x
[
0
],
app_list_map
),
x
[
1
],
x
[
2
],
x
[
3
],
x
[
4
],
x
[
5
],
x
[
6
],
[
value_map
.
get
(
x
[
1
],
300000
),
value_map
.
get
(
date
,
299999
)]))
rdd
.
persist
()
native_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
6
]
==
0
)
.
map
(
lambda
x
:(
x
[
3
],
x
[
4
],
x
[
5
])))
\
native_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
4
]
==
0
)
.
map
(
lambda
x
:(
x
[
1
],
x
[
2
],
x
[
3
])))
\
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
print
(
"native"
)
native_pre
.
toPandas
()
.
to_csv
(
local_path
+
"native.csv"
,
header
=
True
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
6
]
==
0
)
.
map
(
lambda
x
:
(
x
[
0
],
x
[
1
],
x
[
2
],
x
[
7
],
x
[
8
],
x
[
9
],
x
[
10
],
x
[
11
],
x
[
12
],
x
[
13
],
x
[
14
],
x
[
15
],
x
[
16
],
x
[
17
],
x
[
18
],
x
[
19
],
x
[
20
],
x
[
21
],
x
[
22
],
x
[
23
])))
\
.
toDF
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"y"
,
"z"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"top"
,
"stat_date"
)
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
option
(
"recordType"
,
"SequenceExample"
)
\
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
4
]
==
0
)
.
map
(
lambda
x
:
(
x
[
0
],
x
[
5
],
x
[
6
],
x
[
7
])))
\
.
toDF
(
"level2_ids"
,
"y"
,
"z"
,
"ids"
)
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
\
.
save
(
path
=
path
+
"native/"
,
mode
=
"overwrite"
)
nearby_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
6
]
==
1
)
.
map
(
lambda
x
:
(
x
[
3
],
x
[
4
],
x
[
5
])))
\
nearby_pre
=
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
4
]
==
1
)
.
map
(
lambda
x
:(
x
[
1
],
x
[
2
],
x
[
3
])))
\
.
toDF
(
"city"
,
"uid"
,
"cid_id"
)
print
(
"nearby"
)
nearby_pre
.
toPandas
()
.
to_csv
(
local_path
+
"nearby.csv"
,
header
=
True
)
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
6
]
==
1
)
.
map
(
lambda
x
:
(
x
[
0
],
x
[
1
],
x
[
2
],
x
[
7
],
x
[
8
],
x
[
9
],
x
[
10
],
x
[
11
],
x
[
12
],
x
[
13
],
x
[
14
],
x
[
15
],
x
[
16
],
x
[
17
],
x
[
18
],
x
[
19
],
x
[
20
],
x
[
21
],
x
[
22
],
x
[
23
])))
\
.
toDF
(
"app_list"
,
"level2_ids"
,
"level3_ids"
,
"y"
,
"z"
,
"ucity_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"time"
,
"hospital_id"
,
"treatment_method"
,
"price_min"
,
"price_max"
,
"treatment_time"
,
"maintain_time"
,
"recover_time"
,
"top"
,
"stat_date"
)
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
.
option
(
"recordType"
,
"SequenceExample"
)
\
spark
.
createDataFrame
(
rdd
.
filter
(
lambda
x
:
x
[
4
]
==
1
)
.
map
(
lambda
x
:
(
x
[
0
],
x
[
5
],
x
[
6
],
x
[
7
])))
\
.
toDF
(
"level2_ids"
,
"y"
,
"z"
,
"ids"
)
.
repartition
(
1
)
.
write
.
format
(
"tfrecords"
)
\
.
save
(
path
=
path
+
"nearby/"
,
mode
=
"overwrite"
)
rdd
.
unpersist
()
...
...
@@ -197,20 +125,6 @@ def con_sql(db,sql):
return
df
def
test
():
sql
=
"select stat_date,cid_id,y,ccity_name from esmm_train_data limit 60"
rdd
=
spark
.
sql
(
sql
)
.
select
(
"stat_date"
,
"cid_id"
,
"y"
,
"ccity_name"
)
.
rdd
.
map
(
lambda
x
:(
x
[
0
],
x
[
1
],
x
[
2
],
x
[
3
]))
df
=
spark
.
createDataFrame
(
rdd
)
df
.
show
(
6
)
# spark.sql("use online")
# spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar")
# spark.sql("ADD JAR /srv/apps/hive-udf-1.0-SNAPSHOT.jar")
# spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
# spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
#
# spark.sql("select cl_type from online.tl_hdfs_maidian_view where partition_date = '20190312' limit 6").show()
if
__name__
==
'__main__'
:
sparkConf
=
SparkConf
()
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
"true"
)
\
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
"true"
)
\
...
...
@@ -228,8 +142,8 @@ if __name__ == '__main__':
path
=
"hdfs:///strategy/esmm/"
local_path
=
"/home/gmuser/esmm/"
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
=
feature_engineer
()
get_predict
(
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
)
validate_date
,
value_map
,
app_list_map
=
feature_engineer
()
get_predict
(
validate_date
,
value_map
,
app_list_map
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment