Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
3d047df6
Commit
3d047df6
authored
Jan 24, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
删除日期特征
parent
f2142794
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
10 additions
and
9 deletions
+10
-9
feature_engineering.py
tensnsorflow/feature_engineering.py
+6
-5
pipe.sh
tensnsorflow/pipe.sh
+3
-3
test.py
tensnsorflow/test.py
+1
-1
No files found.
tensnsorflow/feature_engineering.py
View file @
3d047df6
...
...
@@ -50,7 +50,7 @@ def get_data():
unique_values
=
[]
features
=
[
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -73,9 +73,11 @@ def get_data():
df
=
df
.
drop
(
"device_id"
,
axis
=
1
)
train
=
df
[
df
[
"stat_date"
]
!=
validate_date
+
"stat_date"
]
train
=
train
.
drop
(
"stat_date"
,
axis
=
1
)
test
=
df
[
df
[
"stat_date"
]
==
validate_date
+
"stat_date"
]
test
=
test
.
drop
(
"stat_date"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"l1"
,
"time"
,
"
stat_date"
,
"
l2"
]:
"channel"
,
"top"
,
"l1"
,
"time"
,
"l2"
]:
train
[
i
]
=
train
[
i
]
.
map
(
value_map
)
test
[
i
]
=
test
[
i
]
.
map
(
value_map
)
...
...
@@ -114,14 +116,13 @@ def get_predict(date,value_map):
6
:
"device_type"
,
7
:
"manufacturer"
,
8
:
"channel"
,
9
:
"top"
,
10
:
"l1"
,
11
:
"l2"
,
12
:
"device_id"
,
13
:
"cid_id"
,
14
:
"time"
})
df
[
"stat_date"
]
=
date
print
(
"predict shape"
)
print
(
df
.
shape
)
df
[
"uid"
]
=
df
[
"device_id"
]
df
[
"city"
]
=
df
[
"ucity_id"
]
features
=
[
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"time"
,
"stat_date"
]
"channel"
,
"top"
,
"time"
]
for
i
in
features
:
df
[
i
]
=
df
[
i
]
.
astype
(
"str"
)
df
[
i
]
=
df
[
i
]
.
fillna
(
"lost"
)
...
...
@@ -138,7 +139,7 @@ def get_predict(date,value_map):
nearby_pre
=
nearby_pre
.
drop
(
"label"
,
axis
=
1
)
for
i
in
[
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"l1"
,
"time"
,
"stat_date"
,
"l2"
]:
"channel"
,
"top"
,
"l1"
,
"time"
,
"l2"
]:
native_pre
[
i
]
=
native_pre
[
i
]
.
map
(
value_map
)
# TODO 没有覆盖到的类别会处理成na,暂时用0填充,后续完善一下
native_pre
[
i
]
=
native_pre
[
i
]
.
fillna
(
0
)
...
...
tensnsorflow/pipe.sh
View file @
3d047df6
...
...
@@ -32,15 +32,15 @@ rm ${DATA_PATH}/nearby/nearby_*
echo
"train..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
2
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
1
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
2
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
0
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
--task_type
=
train
echo
"infer native..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
1
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
0
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/native
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
echo
"infer nearby..."
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
1
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
${
PYTHON_PATH
}
${
MODEL_PATH
}
/train.py
--ctr_task_wgt
=
0.9
--learning_rate
=
0.0001
--deep_layers
=
256,128,64,32
--dropout
=
0.3,0.3,0.3,0.3
--optimizer
=
Adam
--num_epochs
=
1
--embedding_size
=
16
--batch_size
=
1024
--field_size
=
1
0
--feature_size
=
1460
--l2_reg
=
0.005
--log_steps
=
100
--num_threads
=
36
--model_dir
=
${
DATA_PATH
}
/model_ckpt/DeepCvrMTL/
--data_dir
=
${
DATA_PATH
}
/nearby
--task_type
=
infer
>
${
DATA_PATH
}
/infer.log
echo
"sort and 2sql"
${
PYTHON_PATH
}
${
MODEL_PATH
}
/sort_to_sql.py
...
...
tensnsorflow/test.py
View file @
3d047df6
...
...
@@ -29,7 +29,7 @@ def gen_tfrecords(in_file):
for
i
in
range
(
df
.
shape
[
0
]):
feats
=
[
"ucity_id"
,
"clevel1_id"
,
"ccity_name"
,
"device_type"
,
"manufacturer"
,
"channel"
,
"top"
,
"l1"
,
"time"
,
"stat_date"
,
"l2"
]
"channel"
,
"top"
,
"l1"
,
"time"
,
"l2"
]
id
=
np
.
array
([])
for
j
in
feats
:
id
=
np
.
append
(
id
,
df
[
j
][
i
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment