Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
648f190f
Commit
648f190f
authored
May 05, 2019
by
张彦钊
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
修改测试文件
parent
5bb516de
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
13 additions
and
17 deletions
+13
-17
multi.py
tensnsorflow/multi.py
+13
-17
No files found.
tensnsorflow/multi.py
View file @
648f190f
...
...
@@ -197,18 +197,13 @@ def con_sql(db,sql):
def
test
():
sql
=
"select y,z,cid_id from esmm_train_data e where stat_date >= '{}'"
.
format
(
"2019-04-25"
)
df
=
spark
.
sql
(
sql
)
.
dropna
()
df
.
show
(
6
)
df
.
write
.
format
(
"avro"
)
.
save
(
path
=
"/recommend/tr"
,
mode
=
"overwrite"
)
# from hdfs import InsecureClient
# from hdfs.ext.dataframe import read_dataframe
# client = InsecureClient('http://nvwa01:50070')
#
# df = read_dataframe(client,"/recommend/tr/part-00000-80d4e128-4a79-41de-9473-e4d0c5665047-c000.avro")
#
# print(df.head())
from
hdfs
import
InsecureClient
from
hdfs.ext.dataframe
import
read_dataframe
client
=
InsecureClient
(
'http://nvwa01:50070'
)
df
=
read_dataframe
(
client
,
"/recommend/va/part-00199-a6aad7c8-149c-4eee-b718-5d350b26e8d2-c000.avro"
)
print
(
df
.
head
())
# spark.sql("use online")
# spark.sql("ADD JAR /srv/apps/brickhouse-0.7.1-SNAPSHOT.jar")
...
...
@@ -229,12 +224,13 @@ if __name__ == '__main__':
.
set
(
"spark.driver.maxResultSize"
,
"8g"
)
spark
=
SparkSession
.
builder
.
config
(
conf
=
sparkConf
)
.
enableHiveSupport
()
.
getOrCreate
()
ti
=
pti
.
TiContext
(
spark
)
ti
.
tidbMapDatabase
(
"jerry_test"
)
spark
.
sparkContext
.
setLogLevel
(
"WARN"
)
# ti = pti.TiContext(spark)
# ti.tidbMapDatabase("jerry_test")
# spark.sparkContext.setLogLevel("WARN")
# validate_date, value_map, app_list_map, leve2_map, leve3_map = feature_engineer()
# get_predict(validate_date, value_map, app_list_map, leve2_map, leve3_map)
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
=
feature_engineer
()
get_predict
(
validate_date
,
value_map
,
app_list_map
,
leve2_map
,
leve3_map
)
test
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment