Commit 5d851283 authored by 张彦钊's avatar 张彦钊

增加特征

parent fa29f413
...@@ -54,7 +54,7 @@ def get_data(): ...@@ -54,7 +54,7 @@ def get_data():
# 上面order_tag 表不要简称为order,因为order是mysql的保留字,例如order by # 上面order_tag 表不要简称为order,因为order是mysql的保留字,例如order by
df = con_sql(db, sql) df = con_sql(db, sql)
print(df.shape) # print(df.shape)
df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name", df = df.rename(columns={0: "y", 1: "z", 2: "stat_date", 3: "ucity_id", 4: "clevel2_id", 5: "ccity_name",
6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id", 6: "device_type", 7: "manufacturer", 8: "channel", 9: "top", 10: "device_id",
11: "time", 12: "app_list", 13: "service_id", 14: "level3_ids", 15: "level2", 11: "time", 12: "app_list", 13: "service_id", 14: "level3_ids", 15: "level2",
...@@ -103,7 +103,6 @@ def get_data(): ...@@ -103,7 +103,6 @@ def get_data():
df[i] = df[i].fillna("lost_na") df[i] = df[i].fillna("lost_na")
df[i] = df[i].apply(app_list_func, args=(level2_map,)) df[i] = df[i].apply(app_list_func, args=(level2_map,))
unique_values = [] unique_values = []
features = ["ucity_id", "ccity_name", "device_type", "manufacturer", features = ["ucity_id", "ccity_name", "device_type", "manufacturer",
"channel", "top", "time", "stat_date", "hospital_id", "channel", "top", "time", "stat_date", "hospital_id",
......
...@@ -6,6 +6,7 @@ import pytispark.pytispark as pti ...@@ -6,6 +6,7 @@ import pytispark.pytispark as pti
from pyspark.sql import SparkSession from pyspark.sql import SparkSession
import datetime import datetime
import pandas as pd import pandas as pd
import avro.schema
def app_list_func(x,l): def app_list_func(x,l):
...@@ -37,7 +38,7 @@ def feature_engineer(): ...@@ -37,7 +38,7 @@ def feature_engineer():
validate_date = con_sql(db, sql)[0].values.tolist()[0] validate_date = con_sql(db, sql)[0].values.tolist()[0]
print("validate_date:" + validate_date) print("validate_date:" + validate_date)
temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d") temp = datetime.datetime.strptime(validate_date, "%Y-%m-%d")
start = (temp - datetime.timedelta(days=3)).strftime("%Y-%m-%d") start = (temp - datetime.timedelta(days=2)).strftime("%Y-%m-%d")
print(start) print(start)
sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \ sql = "select e.y,e.z,e.stat_date,e.ucity_id,feat.level2_ids,e.ccity_name,u.device_type,u.manufacturer," \
...@@ -53,6 +54,8 @@ def feature_engineer(): ...@@ -53,6 +54,8 @@ def feature_engineer():
df = spark.sql(sql) df = spark.sql(sql)
df.write.format("avro").save(path=path + "tr", mode="overwrite")
url = "jdbc:mysql://172.16.30.143:3306/zhengxing" url = "jdbc:mysql://172.16.30.143:3306/zhengxing"
jdbcDF = spark.read.format("jdbc").option("driver", "com.mysql.jdbc.Driver").option("url", url) \ jdbcDF = spark.read.format("jdbc").option("driver", "com.mysql.jdbc.Driver").option("url", url) \
.option("dbtable", "api_service").option("user", 'work').option("password", 'BJQaT9VzDcuPBqkd').load() .option("dbtable", "api_service").option("user", 'work').option("password", 'BJQaT9VzDcuPBqkd').load()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment