Commit a06b9374 authored by Your Name's avatar Your Name

test dist

parent 873bb49e
......@@ -162,14 +162,14 @@ def main(te_file):
preds = Estimator.predict(input_fn=lambda: input_fn(te_file, num_epochs=1, batch_size=10000), predict_keys=["pctcvr","pctr","pcvr"])
with open("/home/gmuser/esmm/nearby/pred.txt", "w") as fo:
for prob in preds:
fo.write("%f\t%f\t%f\n" % (prob['pctr'], prob['pcvr'], prob['pctcvr']))
# with open("/home/gmuser/esmm/nearby/pred.txt", "w") as fo:
# for prob in preds:
# fo.write("%f\t%f\t%f\n" % (prob['pctr'], prob['pcvr'], prob['pctcvr']))
# indices = []
# for prob in preds:
# indices.append([prob['pctr'], prob['pcvr'], prob['pctcvr']])
# return indices
indices = []
for prob in preds:
indices.append([prob['pctr'], prob['pcvr'], prob['pctcvr']])
return indices
def test_map(x):
return x * x
......@@ -198,13 +198,13 @@ if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
te_files = [path + "nearby/part-r-00000"]
main(te_files)
# te_files = [[path+"nearby/part-r-00000"],[path+"native/part-r-00000"]]
# rdd_te_files = spark.sparkContext.parallelize(te_files)
# indices = rdd_te_files.repartition(2).map(lambda x: main(x))
# print(indices.collect())
# tf.app.run()
# te_files = [path + "nearby/part-r-00000"]
# main(te_files)
te_files = [[path+"nearby/part-r-00000"],[path+"native/part-r-00000"]]
rdd_te_files = spark.sparkContext.parallelize(te_files)
indices = rdd_te_files.repartition(2).map(lambda x: main(x))
print(indices.collect())
tf.app.run()
b = time.time()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment