Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
a207254f
Commit
a207254f
authored
Jun 05, 2019
by
Your Name
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
998a9aa1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
27 additions
and
6 deletions
+27
-6
dist_predict.py
eda/esmm/Model_pipline/dist_predict.py
+27
-6
No files found.
eda/esmm/Model_pipline/dist_predict.py
View file @
a207254f
from
datetime
import
date
,
timedelta
import
tensorflow
as
tf
import
pymysql
from
pyspark.conf
import
SparkConf
import
pytispark.pytispark
as
pti
from
pyspark.sql
import
SparkSession
import
datetime
import
pandas
as
pd
import
time
from
pyspark
import
StorageLevel
def
model_fn
(
features
,
labels
,
mode
,
params
):
"""Bulid Model function f(x) for Estimator."""
...
...
@@ -135,7 +141,7 @@ def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
#print(batch_features,batch_labels)
return
batch_features
,
batch_labels
def
main
(
_
):
def
esmm_predict
(
dist_data
):
dt_dir
=
(
date
.
today
()
+
timedelta
(
-
1
))
.
strftime
(
'
%
Y
%
m
%
d'
)
model_dir
=
"hdfs://172.16.32.4:8020/strategy/esmm/model_ckpt/DeepCvrMTL/"
+
dt_dir
te_files
=
[
"hdfs://172.16.32.4:8020/strategy/esmm/nearby/part-r-00000"
]
...
...
@@ -153,13 +159,28 @@ def main(_):
log_step_count_steps
=
100
,
save_summary_steps
=
100
)
Estimator
=
tf
.
estimator
.
Estimator
(
model_fn
=
model_fn
,
model_dir
=
"hdfs://172.16.32.4:8020/strategy/esmm/model_ckpt/DeepCvrMTL/"
,
params
=
model_params
,
config
=
config
)
preds
=
Estimator
.
predict
(
input_fn
=
lambda
:
input_fn
(
te_files
,
num_epochs
=
1
,
batch_size
=
10000
),
predict_keys
=
[
"pctcvr"
,
"pctr"
,
"pcvr"
])
with
open
(
"/home/gmuser/esmm/nearby"
+
"/pred.txt"
,
"w"
)
as
fo
:
for
prob
in
preds
:
fo
.
write
(
"
%
f
\t
%
f
\t
%
f
\n
"
%
(
prob
[
'pctr'
],
prob
[
'pcvr'
],
prob
[
'pctcvr'
]))
preds
=
Estimator
.
predict
(
input_fn
=
lambda
:
input_fn
(
dist_data
,
num_epochs
=
1
,
batch_size
=
10000
),
predict_keys
=
[
"pctcvr"
,
"pctr"
,
"pcvr"
])
indices
=
[]
for
prob
in
preds
:
indices
.
append
([
prob
[
'pctr'
],
prob
[
'pcvr'
],
prob
[
'pctcvr'
]])
return
indices
if
__name__
==
"__main__"
:
sparkConf
=
SparkConf
()
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
"true"
)
\
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
"true"
)
\
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
"false"
)
\
.
set
(
"spark.tispark.plan.allow_index_read"
,
"true"
)
\
.
set
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
\
.
set
(
"spark.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
.
set
(
"spark.io.compression.codec"
,
"lzf"
)
\
.
set
(
"spark.driver.maxResultSize"
,
"8g"
)
.
set
(
"spark.sql.avro.compression.codec"
,
"snappy"
)
spark
=
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
enableHiveSupport
()
.
getOrCreate
()
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
path
=
"hdfs://172.16.32.4:8020/strategy/esmm/"
b
=
time
.
time
()
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
INFO
)
tf
.
app
.
run
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment