Commit a11ec230 authored by 张彦钊's avatar 张彦钊

change test file

parent c13effaa
...@@ -4,13 +4,11 @@ from __future__ import absolute_import ...@@ -4,13 +4,11 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import pandas as pd import pandas as pd
import sys
import os import os
import glob import glob
import tensorflow as tf import tensorflow as tf
import numpy as np import numpy as np
import re
from multiprocessing import Pool as ThreadPool from multiprocessing import Pool as ThreadPool
flags = tf.app.flags flags = tf.app.flags
......
...@@ -205,19 +205,8 @@ def con_sql(db,sql): ...@@ -205,19 +205,8 @@ def con_sql(db,sql):
db.close() db.close()
return df return df
def test():
sparkConf = SparkConf().set("spark.hive.mapred.supports.subdirectories", "true")\
.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", "true")\
.set("spark.tispark.plan.allow_index_double_read", "false") \
.set("spark.tispark.plan.allow_index_read", "true")\
.set("spark.sql.extensions", "org.apache.spark.sql.TiExtensions")\
.set("spark.tispark.pd.addresses", "172.16.40.158:2379").set("spark.io.compression.codec", "lzf")
spark = SparkSession.builder.config(conf= sparkConf).enableHiveSupport().getOrCreate()
def test():
spark.sql("use online") spark.sql("use online")
spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR /srv/apps/hive-udf-1.0-SNAPSHOT.jar") spark.sql("ADD JAR /srv/apps/hive-udf-1.0-SNAPSHOT.jar")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment