Commit ba2a0bcf authored by 高雅喆's avatar 高雅喆

Merge branch 'master' of git.wanmeizhensuo.com:ML/ffm-baseline

bug fix
parents 5d6c8577 d6ac61ef
......@@ -376,3 +376,119 @@ object hospital_gengmei {
}
object meigou_xiaofei_renshu {
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.apache.eclipse.jetty.server").setLevel(Level.OFF)
case class Params(env: String = "dev",
date: String = "2018-08-01"
) extends AbstractParams[Params] with Serializable
val defaultParams = Params()
val parser = new OptionParser[Params]("Feed_EDA") {
head("WeafareStat")
opt[String]("env")
.text(s"the databases environment you used")
.action((x, c) => c.copy(env = x))
opt[String] ("date")
.text(s"the date you used")
.action((x,c) => c.copy(date = x))
note(
"""
|For example, the following command runs this app on a tidb dataset:
|
| spark-submit --class com.gmei.WeafareStat ./target/scala-2.11/feededa-assembly-0.1.jar \
""".stripMargin +
s"| --env ${defaultParams.env}"
)
}
def main(args: Array[String]): Unit = {
parser.parse(args, defaultParams).map { param =>
GmeiConfig.setup(param.env)
val spark_env = GmeiConfig.getSparkSession()
val sc = spark_env._2
val ti = new TiContext(sc)
ti.tidbMapTable(dbName = "jerry_prod", tableName = "diary_video")
ti.tidbMapTable(dbName = "jerry_prod", tableName = "data_feed_click")
ti.tidbMapTable(dbName = "jerry_prod", tableName = "blacklist")
ti.tidbMapTable(dbName = "jerry_test", tableName = "bl_device_list")
ti.tidbMapTable(dbName = "jerry_prod", tableName = "data_feed_exposure")
ti.tidbMapTable(dbName = "jerry_prod", tableName = "merge_queue_table")
import sc.implicits._
val stat_date = GmeiConfig.getMinusNDate(1)
//println(param.date)
val partition_date = stat_date.replace("-","")
val agency_id = sc.sql(
s"""
|SELECT DISTINCT(cl_id) as device_id
|FROM online.ml_hospital_spam_pv_day
|WHERE partition_date >= '20180402'
|AND partition_date <= '${partition_date}'
|AND pv_ratio >= 0.95
|UNION ALL
|SELECT DISTINCT(cl_id) as device_id
|FROM online.ml_hospital_spam_pv_month
|WHERE partition_date >= '20171101'
|AND partition_date <= '${partition_date}'
|AND pv_ratio >= 0.95
""".stripMargin
)
agency_id.createOrReplaceTempView("agency_id")
val blacklist_id = sc.sql(
s"""
|SELECT device_id
|from blacklist
""".stripMargin
)
blacklist_id.createOrReplaceTempView("blacklist_id")
val final_id = sc.sql(
s"""
|select device_id
|from agency_id
|UNION ALL
|select device_id
|from blacklist_id
""".stripMargin
)
final_id.createOrReplaceTempView("final_id")
val meigou_price = sc.sql(
s"""
|select md.user_id,sum(md.gengmei_price) as pay_all
|from online.ml_meigou_order_detail md left join final_id
|on md.device_id = final_id.device_id
|where md.status= 2
|and final_id.device_id is null
|and md.partition_date = '20181223'
|and md.pay_time is not null
|and md.validate_time>'2018-01-01 00:00:00.0'
|group by md.user_id
|order by sum(md.gengmei_price)
""".stripMargin
)
meigou_price.show(80)
GmeiConfig.writeToJDBCTable(meigou_price, "meigou_price", SaveMode.Append)
}
}
}
......@@ -34,7 +34,7 @@ def sort_app():
df = df.rename(columns={0: "device_id", 1: "app_list"})
df = df.loc[df["app_list"].apply(is_json)]
category = {"competitor":{"新氧美容"},
category = {"competitor":{"新氧美容","悦美","美呗整形","悦美微整形","如丽美容","医美咖","整形去哪儿","美黛拉","整形思密达","美芽"},
"dianshang":{"京东","淘宝","唯品会","天猫","苏宁易购","国美","当当","亚马逊","网易严选","小米有品"},
"kuajing_dianshang": {"小红书", "网易考拉", "洋码头", "达令全球好货", "海狐海淘",
"HIG0", "豌豆公主", "尚品网", "丰趣海淘", "比呀比海外购"},
......@@ -72,7 +72,7 @@ def sort_app():
for i in category.keys():
df[i] = df["app_list"].apply(lambda x: 1 if len(x & category[i]) > 0 else 0)
print(i)
print(df[i].unique())
print(df[i].value_counts())
df = df.drop("app_list",axis=1)
yconnect = create_engine('mysql+pymysql://root:3SYz54LS9#^9sBvC@10.66.157.22:4000/jerry_test?charset=utf8')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment