Commit c9187a48 authored by litaolemo's avatar litaolemo

update

parent c6bca010
...@@ -39,7 +39,7 @@ sparkConf.set("spark.shuffle.statistics.verbose", True) ...@@ -39,7 +39,7 @@ sparkConf.set("spark.shuffle.statistics.verbose", True)
# sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "67108864") # sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleInputSize", "67108864")
# sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleRowCount", "20000000") # sparkConf.set("spark.sql.adaptive.shuffle.targetPostShuffleRowCount", "20000000")
sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True) sparkConf.set("spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive", True)
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") # sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
sparkConf.set("mapreduce.output.fileoutputformat.compress", False) sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
sparkConf.set("mapreduce.map.output.compress", False) sparkConf.set("mapreduce.map.output.compress", False)
sparkConf.set("prod.gold.jdbcuri", sparkConf.set("prod.gold.jdbcuri",
...@@ -53,9 +53,9 @@ sparkConf.set("prod.tidb.jdbcuri", ...@@ -53,9 +53,9 @@ sparkConf.set("prod.tidb.jdbcuri",
sparkConf.set("prod.jerry.jdbcuri", sparkConf.set("prod.jerry.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true") "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379") sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf.set("spark.sql.parquet.compression.codec", "snappy") # sparkConf.set("spark.sql.parquet.compression.codec", "snappy")
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000") # sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
sparkConf.set("prod.tidb.database", "jerry_prod") # sparkConf.set("prod.tidb.database", "jerry_prod")
# sparkConf.set("spark.executor.extraJavaOptions", "-Djava.library.path=$HADOOP_HOME/lib/native") # sparkConf.set("spark.executor.extraJavaOptions", "-Djava.library.path=$HADOOP_HOME/lib/native")
sparkConf.set("spark.driver.extraLibraryPath", "/opt/hadoop/lib/native") sparkConf.set("spark.driver.extraLibraryPath", "/opt/hadoop/lib/native")
# sparkConf.set("spark.driver.extraJavaOptions", "-Djava.library.path=$HADOOP_HOME/lib/native") # sparkConf.set("spark.driver.extraJavaOptions", "-Djava.library.path=$HADOOP_HOME/lib/native")
...@@ -66,7 +66,7 @@ spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extension ...@@ -66,7 +66,7 @@ spark = (SparkSession.builder.config(conf=sparkConf).config("spark.sql.extension
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar")
spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar") spark.sql("ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar")
spark.sql("ADD JAR /srv/apps/meta_base_code/snappy-java-1.1.2.jar") # spark.sql("ADD JAR /srv/apps/meta_base_code/snappy-java-1.1.2.jar")
spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'") spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'")
spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'") spark.sql("CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'")
spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'") spark.sql("CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment