Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
7757e6b9
Commit
7757e6b9
authored
5 years ago
by
Your Name
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
test features
parent
ad91ae82
gyz
No related merge requests found
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
18 additions
and
27 deletions
+18
-27
dist_predict.py
eda/esmm/Model_pipline/dist_predict.py
+18
-27
No files found.
eda/esmm/Model_pipline/dist_predict.py
View file @
7757e6b9
...
@@ -176,9 +176,6 @@ def main(te_file):
...
@@ -176,9 +176,6 @@ def main(te_file):
# indices.append([prob['pctr'], prob['pcvr'], prob['pctcvr']])
# indices.append([prob['pctr'], prob['pcvr'], prob['pctcvr']])
# return indices
# return indices
def
test_map
(
x
):
return
x
*
x
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
b
=
time
.
time
()
b
=
time
.
time
()
...
@@ -195,31 +192,25 @@ if __name__ == "__main__":
...
@@ -195,31 +192,25 @@ if __name__ == "__main__":
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
path
=
"hdfs://172.16.32.4:8020/strategy/esmm/"
path
=
"hdfs://172.16.32.4:8020/strategy/esmm/"
# df = spark.read.format("tfrecords").load(path+"nearby/part-r-00000")
df
=
spark
.
read
.
format
(
"tfrecords"
)
.
load
(
path
+
"nearby/part-r-00000"
)
# df.show()
df
.
show
()
# name = spark.sparkContext.parallelize([1,2,3,4,5])
#
# test = name.repartition(5).map(lambda x: test_map(x))
# print(test)
# print(test.collect())
#
# tf.logging.set_verbosity(tf.logging.INFO)
# tf.logging.set_verbosity(tf.logging.INFO)
#
# te_files = [
path + "nearby/part-r-00000"
]
# te_files = []
te_files
=
[]
# for i in range(0,10):
for
i
in
range
(
0
,
10
):
# te_files.append([path + "native/part-r-0000" + str(i)])
te_files
.
append
([
path
+
"native/part-r-0000"
+
str
(
i
)])
# for i in range(10,100):
for
i
in
range
(
10
,
100
):
# te_files.append([path + "native/part-r-000" + str(i)])
te_files
.
append
([
path
+
"native/part-r-000"
+
str
(
i
)]
)
# # main(te_files
)
#
main(te_files)
#
# te_files = [[path+"nearby/part-r-00000"],[path+"native/part-r-00000"]]
#
te_files = [[path+"nearby/part-r-00000"],[path+"native/part-r-00000"]]
#
rdd_te_files = spark.sparkContext.parallelize(te_files)
rdd_te_files
=
spark
.
sparkContext
.
parallelize
(
te_files
)
# print("-"*100
)
print
(
"-"
*
100
)
# print(rdd_te_files.collect()
)
print
(
rdd_te_files
.
collect
()
)
# print("-" * 100
)
print
(
"-"
*
100
)
# indices = rdd_te_files.repartition(100).map(lambda x: main(x)
)
indices
=
rdd_te_files
.
repartition
(
100
)
.
map
(
lambda
x
:
main
(
x
))
# print(indices.take(1
))
print
(
indices
.
take
(
1
))
print
(
"耗时(秒):"
)
print
(
"耗时(秒):"
)
print
((
time
.
time
()
-
b
))
print
((
time
.
time
()
-
b
))
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment