Commit 70851e18 authored by Your Name's avatar Your Name

test features

parent 7757e6b9
......@@ -192,24 +192,24 @@ if __name__ == "__main__":
spark.sparkContext.setLogLevel("WARN")
path = "hdfs://172.16.32.4:8020/strategy/esmm/"
df = spark.read.format("tfrecords").load(path+"nearby/part-r-00000")
df.show()
# df = spark.read.format("tfrecords").load(path+"nearby/part-r-00000")
# df.show()
# tf.logging.set_verbosity(tf.logging.INFO)
# te_files = []
# for i in range(0,10):
# te_files.append([path + "native/part-r-0000" + str(i)])
# for i in range(10,100):
# te_files.append([path + "native/part-r-000" + str(i)])
# # main(te_files)
# # te_files = [[path+"nearby/part-r-00000"],[path+"native/part-r-00000"]]
# rdd_te_files = spark.sparkContext.parallelize(te_files)
# print("-"*100)
# print(rdd_te_files.collect())
# print("-" * 100)
# indices = rdd_te_files.repartition(100).map(lambda x: main(x))
# print(indices.take(1))
te_files = []
for i in range(0,10):
te_files.append([path + "test_nearby/part-r-0000" + str(i)])
for i in range(10,100):
te_files.append([path + "test_nearby/part-r-000" + str(i)])
rdd_te_files = spark.sparkContext.parallelize(te_files)
print("-"*100)
print(rdd_te_files.collect())
print("-" * 100)
indices = rdd_te_files.repartition(100).map(lambda x: main(x))
print(indices.take(1))
print("耗时(秒):")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment