Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
meta_base_code
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
黎涛
meta_base_code
Commits
d0659f0d
Commit
d0659f0d
authored
Nov 26, 2020
by
litaolemo
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update
parent
4c6758d9
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
8 additions
and
8 deletions
+8
-8
daily_search_word_count_last_two_year.py
task/daily_search_word_count_last_two_year.py
+8
-8
No files found.
task/daily_search_word_count_last_two_year.py
View file @
d0659f0d
...
...
@@ -122,10 +122,10 @@ spark.sql("CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION arrayMerge AS 'com.gmei.hive.common.udf.UDFArryMerge'"
)
df
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"
zhengxing.
wiki_item"
)
.
load
()
df1
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"
zhengxing.
wiki_product"
)
.
load
()
df2
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"
zhengxing.
wiki_collect"
)
.
load
()
df3
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"
zhengxing.
wiki_brand"
)
.
load
()
df
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"wiki_item"
)
.
load
()
df1
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"wiki_product"
)
.
load
()
df2
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"wiki_collect"
)
.
load
()
df3
=
spark
.
read
.
jdbc
(
url
=
"jdbc:mysql://172.16.30.143/zhengxing?user=work&password=BJQaT9VzDcuPBqkd&rewriteBatchedStatements=true"
,
table
=
"wiki_brand"
)
.
load
()
# print(huidu_device_id_sql)
# huidu_device_id_df = spark.sql(huidu_device_id_sql)
# huidu_device_id_df.createOrReplaceTempView("dev_view")
...
...
@@ -140,13 +140,13 @@ for t in range(1, task_days):
last_two_year_str
=
(
now
+
datetime
.
timedelta
(
days
=-
370
))
.
strftime
(
"
%
Y
%
m
%
d"
)
# one_week_age_str = (now + datetime.timedelta(days=-7)).strftime("%Y%m%d")
keyword_sql
=
"""
select name from
zhengxing.
wiki_item where is_online=True
select name from wiki_item where is_online=True
union
select name from
zhengxing.
wiki_product where is_online=True
select name from wiki_product where is_online=True
union
select name from
zhengxing.
wiki_collect where is_online=True
select name from wiki_collect where is_online=True
union
select name from
zhengxing.
wiki_brand where is_online=True
select name from wiki_brand where is_online=True
"""
keyword_df
=
spark
.
sql
(
keyword_sql
)
keyword_df
.
createOrReplaceTempView
(
"keywords_table"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment