Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
宋柯
meta_base_code
Commits
dbe6150b
Commit
dbe6150b
authored
Mar 19, 2021
by
宋柯
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
降配
parent
7549fc88
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
55 additions
and
60 deletions
+55
-60
daily_once_task.sh
daily_once_task.sh
+0
-0
task.sh
task.sh
+4
-4
advertisement_strategy_d.py
task/advertisement_strategy_d.py
+1
-1
conent_detail_page_grayscale_ctr.py
task/conent_detail_page_grayscale_ctr.py
+9
-9
core_indicators_monitoring.py
task/core_indicators_monitoring.py
+8
-9
daily_search_word_count.py
task/daily_search_word_count.py
+8
-9
recommend_strategy_d.py
task/recommend_strategy_d.py
+8
-9
recommend_strategy_fix.py
task/recommend_strategy_fix.py
+8
-9
search_meigou_ctr.py
task/search_meigou_ctr.py
+1
-1
search_strategy_d.py
task/search_strategy_d.py
+8
-9
No files found.
daily_once_task.sh
View file @
dbe6150b
This diff is collapsed.
Click to expand it.
task.sh
View file @
dbe6150b
source
/srv/envs/esmm/bin/activate
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
6g
--executor-memory
1g
--executor-cores
1
--num-executors
30
--conf
spark.default.parallelism
=
100
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/conent_detail_page_grayscale_ctr.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
6g
--executor-memory
1g
--executor-cores
1
--num-executors
30
--conf
spark.default.parallelism
=
100
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_d.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
6g
--executor-memory
1g
--executor-cores
1
--num-executors
30
--conf
spark.default.parallelism
=
100
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_fix.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
6g
--executor-memory
1g
--executor-cores
1
--num-executors
30
--conf
spark.default.parallelism
=
100
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/tispark-core-2.1-SNAPSHOT-jar-with-dependencies.jar,
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/search_strategy_d.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/conent_detail_page_grayscale_ctr.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_d.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/recommend_strategy_fix.py
/opt/spark/bin/spark-submit
--master
yarn
--deploy-mode
client
--queue
root.strategy
--driver-memory
1
g
--executor-memory
2g
--executor-cores
1
--num-executors
2
--conf
spark.default.parallelism
=
50
--conf
spark.storage.memoryFraction
=
0.5
--conf
spark.shuffle.memoryFraction
=
0.3
--conf
spark.locality.wait
=
0
--jars
/srv/apps/spark-connector_2.11-1.9.0-rc2.jar,/srv/apps/mysql-connector-java-5.1.38.jar /srv/apps/meta_base_code/task/search_strategy_d.py
task/advertisement_strategy_d.py
View file @
dbe6150b
...
...
@@ -67,7 +67,7 @@ sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"
LR PYSPARK TEST
"
)
.
enableHiveSupport
()
.
getOrCreate
())
"
advertisement_strategy_d
"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/conent_detail_page_grayscale_ctr.py
View file @
dbe6150b
...
...
@@ -39,13 +39,13 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -56,13 +56,13 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"
LR PYSPARK TEST
"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
appName
(
"
conent_detail_page_grayscale_ctr
"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/core_indicators_monitoring.py
View file @
dbe6150b
...
...
@@ -28,13 +28,13 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -45,13 +45,12 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"LR PYSPARK TEST"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
appName
(
"core_indicators_monitoring"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/daily_search_word_count.py
View file @
dbe6150b
...
...
@@ -30,13 +30,13 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -47,13 +47,12 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
sparkConf
.
setAppName
(
"
tes
t"
)
sparkConf
.
setAppName
(
"
daily_search_word_coun
t"
)
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/recommend_strategy_d.py
View file @
dbe6150b
...
...
@@ -43,13 +43,13 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -60,13 +60,12 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"LR PYSPARK TEST"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
appName
(
"recommend_strategy_d"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/recommend_strategy_fix.py
View file @
dbe6150b
...
...
@@ -42,13 +42,13 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -59,13 +59,12 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"LR PYSPARK TEST"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
appName
(
"recommend_strategy_fix"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/search_meigou_ctr.py
View file @
dbe6150b
...
...
@@ -64,7 +64,7 @@ sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"
LR PYSPARK TEST
"
)
.
enableHiveSupport
()
.
getOrCreate
())
"
search_meigou_ctr
"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
task/search_strategy_d.py
View file @
dbe6150b
...
...
@@ -32,16 +32,16 @@ startTime = time.time()
sparkConf
=
SparkConf
()
sparkConf
.
set
(
"spark.sql.crossJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.debug.maxToStringFields"
,
"100"
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_double_read"
,
False
)
sparkConf
.
set
(
"spark.tispark.plan.allow_index_read"
,
True
)
#
sparkConf.set("spark.tispark.plan.allow_index_double_read", False)
#
sparkConf.set("spark.tispark.plan.allow_index_read", True)
sparkConf
.
set
(
"spark.hive.mapred.supports.subdirectories"
,
True
)
sparkConf
.
set
(
"spark.sql.adaptive.enabled"
,
True
)
sparkConf
.
set
(
"spark.sql.adaptive.skewedJoin.enabled"
,
True
)
sparkConf
.
set
(
"spark.shuffle.statistics.verbose"
,
True
)
sparkConf
.
set
(
"spark.serializer"
,
"org.apache.spark.serializer.KryoSerializer"
)
sparkConf
.
set
(
"mapreduce.output.fileoutputformat.compress"
,
False
)
sparkConf
.
set
(
"mapreduce.map.output.compress"
,
False
)
#
sparkConf.set("mapreduce.output.fileoutputformat.compress", False)
#
sparkConf.set("mapreduce.map.output.compress", False)
sparkConf
.
set
(
"prod.gold.jdbcuri"
,
"jdbc:mysql://172.16.30.136/doris_prod?user=doris&password=o5gbA27hXHHm&rewriteBatchedStatements=true"
)
sparkConf
.
set
(
"prod.mimas.jdbcuri"
,
...
...
@@ -52,17 +52,16 @@ sparkConf.set("prod.tidb.jdbcuri",
"jdbc:mysql://172.16.40.158:4000/eagle?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true"
)
# sparkConf.set("prod.jerry.jdbcuri",
# "jdbc:mysql://172.16.40.158:4000/jerry_prod?user=st_user&password=aqpuBLYzEV7tML5RPsN1pntUzFy&rewriteBatchedStatements=true")
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.158:2379"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.158:2379")
sparkConf
.
set
(
"spark.sql.parquet.compression.codec"
,
"snappy"
)
sparkConf
.
set
(
"prod.tispark.pd.addresses"
,
"172.16.40.170:4000"
)
#
sparkConf.set("prod.tispark.pd.addresses", "172.16.40.170:4000")
# sparkConf.set("prod.tidb.database", "jerry_prod")
# sparkConf.set("spark.executor.extraJavaOptions", "-Djava.library.path=HADOOP_HOME/lib/native")
sparkConf
.
set
(
"spark.driver.extraLibraryPath"
,
"/opt/hadoop/lib/native"
)
# sparkConf.set("spark.driver.extraJavaOptions", "-Djava.library.path=HADOOP_HOME/lib/native")
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
config
(
"spark.sql.extensions"
,
"org.apache.spark.sql.TiExtensions"
)
.
config
(
"spark.tispark.pd.addresses"
,
"172.16.40.170:2379"
)
.
appName
(
"LR PYSPARK TEST"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
=
(
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
appName
(
"search_strategy_d"
)
.
enableHiveSupport
()
.
getOrCreate
())
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment