Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
3010cb4d
Commit
3010cb4d
authored
Apr 25, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
修改测试文件
parent
7633f582
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
5 additions
and
32 deletions
+5
-32
multi.py
tensnsorflow/multi.py
+5
-32
No files found.
tensnsorflow/multi.py
View file @
3010cb4d
...
@@ -28,7 +28,7 @@ def feature_engineer():
...
@@ -28,7 +28,7 @@ def feature_engineer():
ti
.
tidbMapDatabase
(
"jerry_test"
)
ti
.
tidbMapDatabase
(
"jerry_test"
)
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer,"
\
sql
=
"select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer,"
\
"u.channel,c.top,
e.device_id,
cut.time,dl.app_list,e.diary_service_id,feat.level3_ids,"
\
"u.channel,c.top,cut.time,dl.app_list,e.diary_service_id,feat.level3_ids,"
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"k.treatment_method,k.price_min,k.price_max,k.treatment_time,k.maintain_time,k.recover_time "
\
"from esmm_train_data e left join user_feature u on e.device_id = u.device_id "
\
"from esmm_train_data e left join user_feature u on e.device_id = u.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
"left join cid_type_top c on e.device_id = c.device_id "
\
...
@@ -57,13 +57,13 @@ def feature_engineer():
...
@@ -57,13 +57,13 @@ def feature_engineer():
df
=
df
.
join
(
hospital
,
"diary_service_id"
,
"left_outer"
)
.
fillna
(
"na"
)
df
=
df
.
join
(
hospital
,
"diary_service_id"
,
"left_outer"
)
.
fillna
(
"na"
)
df
.
show
(
6
)
df
.
show
(
6
)
print
(
df
.
count
())
print
(
df
.
count
())
df
=
df
.
drop
([
"level2"
,
"diary_service_id"
])
# db = pymysql.connect(host='172.16.30.143', port=3306, user='work',
# passwd='BJQaT9VzDcuPBqkd', db='zhengxing')
# df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
# df = df.drop_duplicates(["ucity_id", "level2_ids", "ccity_name", "device_type", "manufacturer",
...
@@ -76,35 +76,8 @@ def feature_engineer():
...
@@ -76,35 +76,8 @@ def feature_engineer():
# 6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
# 6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
# 11: "time", 12: "app_list", 13: "service_id", 14: "level3_ids", 15: "level2"})
# 11: "time", 12: "app_list", 13: "service_id", 14: "level3_ids", 15: "level2"})
#
#
# db = pymysql.connect(host='172.16.40.158', port=4000, user='root', passwd='3SYz54LS9#^9sBvC', db='jerry_test')
# sql = "select level2_id,treatment_method,price_min,price_max,treatment_time,maintain_time,recover_time " \
# "from train_Knowledge_network_data"
# knowledge = con_sql(db, sql)
# knowledge = knowledge.rename(columns={0: "level2", 1: "method", 2: "min", 3: "max",
# 4: "treatment_time", 5: "maintain_time", 6: "recover_time"})
# knowledge["level2"] = knowledge["level2"].astype("str")
#
# df = pd.merge(df, knowledge, on='level2', how='left')
# df = df.drop("level2", axis=1)
#
# service_id = tuple(df["service_id"].unique())
# db = pymysql.connect(host='172.16.30.143', port=3306, user='work',
# passwd='BJQaT9VzDcuPBqkd', db='zhengxing')
# sql = "select s.id,d.hospital_id from api_service s left join api_doctor d on s.doctor_id = d.id " \
# "where s.id in {}".format(service_id)
# hospital = con_sql(db, sql)
# hospital = hospital.rename(columns={0: "service_id", 1: "hospital_id"})
# # print(hospital.head())
# # print("hospital")
# # print(hospital.count())
# hospital["service_id"] = hospital["service_id"].astype("str")
# df = pd.merge(df, hospital, on='service_id', how='left')
# df = df.drop("service_id", axis=1)
#
# print(df.count())
#
# print("before")
# print(df.shape)
#
#
# df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer",
# df = df.drop_duplicates(["ucity_id", "clevel2_id", "ccity_name", "device_type", "manufacturer",
# "channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids"])
# "channel", "top", "time", "stat_date", "app_list", "hospital_id", "level3_ids"])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment