Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
F
ffm-baseline
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ML
ffm-baseline
Commits
d7c5f38e
Commit
d7c5f38e
authored
Apr 18, 2019
by
王志伟
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
http://git.wanmeizhensuo.com/ML/ffm-baseline
parents
864101b7
822315f0
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
32 additions
and
20 deletions
+32
-20
test.py
tensnsorflow/test.py
+32
-20
No files found.
tensnsorflow/test.py
View file @
d7c5f38e
import
datetime
# -*- coding: utf-8 -*-
from
pyspark.sql
import
HiveContext
from
pyspark.context
import
SparkContext
from
pyspark.conf
import
SparkConf
...
...
@@ -7,30 +6,43 @@ from pyspark.sql import SQLContext
from
pyspark.sql
import
SparkSession
# from py4j.java_gateway import java_import
# import pytispark.pytispark as pti
import
pandas
as
pd
import
pymysql
def
con_sql
(
db
,
sql
):
cursor
=
db
.
cursor
()
try
:
cursor
.
execute
(
sql
)
result
=
cursor
.
fetchall
()
df
=
pd
.
DataFrame
(
list
(
result
))
except
Exception
:
print
(
"发生异常"
,
Exception
)
df
=
pd
.
DataFrame
()
finally
:
db
.
close
()
return
df
# import pandas as pd
#
# def con_sql(db,sql):
# cursor = db.cursor()
# try:
# cursor.execute(sql)
# result = cursor.fetchall()
# df = pd.DataFrame(list(result))
# except Exception:
# print("发生异常", Exception)
# df = pd.DataFrame()
# finally:
# db.close()
# return df
def
test
():
conf
=
SparkConf
()
.
setAppName
(
"My App"
)
.
set
(
"spark.io.compression.codec"
,
"lzf"
)
sc
=
SparkContext
(
conf
=
conf
)
hive_context
=
HiveContext
(
sc
)
hive_context
.
sql
(
''' select device["device_type"] from online.tl_hdfs_maidian_view
where partition_date = '20181012' and action = "page_view"
and params["page_name"] = "diary_detail" and params["referrer"] = "home" limit 10 '''
)
.
show
(
6
)
from
pyspark.sql
import
SparkSession
spark
=
SparkSession
.
builder
.
appName
(
"BinarizerExample"
)
.
enableHiveSupport
()
.
getOrCreate
()
spark
.
sql
(
"use online"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/brickhouse-0.7.1-SNAPSHOT.jar"
)
spark
.
sql
(
"ADD JAR hdfs:///user/hive/share/lib/udf/hive-udf-1.0-SNAPSHOT.jar"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION json_map AS 'brickhouse.udf.json.JsonMapUDF'"
)
spark
.
sql
(
"CREATE TEMPORARY FUNCTION is_json AS 'com.gmei.hive.common.udf.UDFJsonFormatCheck'"
)
# hive_context.sql("SET mapreduce.job.queuename=data")
# hive_context.sql("SET mapred.input.dir.recursive=true")
# hive_context.sql("SET hive.mapred.supports.subdirectories=true")
sql
=
"select user_id from online.tl_hdfs_maidian_view where partition_date = '20190412' limit 10"
spark
.
sql
(
sql
)
.
show
(
6
)
# def esmm_pre():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment