icu.wuhufly.dws.machine_runningAVG_compare09.scala Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of shtd-bd Show documentation
Show all versions of shtd-bd Show documentation
bigdata source code for shtd
The newest version!
package icu.wuhufly.dws
import icu.wuhufly.utils.{CreateUtils, WriteUtils}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}
object machine_runningAVG_compare09 {
def main(args: Array[String]): Unit = {
val spark: SparkSession = CreateUtils.getSpark()
val sc: SparkContext = spark.sparkContext
import spark.implicits._
import org.apache.spark.sql.functions._
spark.sql("use dwd")
val df: DataFrame = spark.sql(
"""
|select base_id, machine_avg, factory_avg,
| case when machine_avg > factory_avg then '高'
| when machine_avg = factory_avg then '相同'
| else '低' end as comparison,
| env_date_year, env_date_month
|from (
|select distinct BaseID as base_id, avg(PM10) over(partition by BaseID) as machine_avg,
| avg(PM10) over(partition by MachineFactory) as factory_avg,
| year(ts) as env_date_year,
| month(ts) as env_date_month
|from (
|select t1.PM10, t1.BaseID, to_timestamp(t1.InPutTime, 'yyyy-MM-dd HH:mm:ss') as ts,
| t2.MachineFactory
| from fact_environment_data t1
| join dim_machine t2
| on t1.BaseID = t2.BaseMachineID
|) t1
|) t1
|""".stripMargin)
WriteUtils.writeToMysql(
"machine_runningAVG_compare", df
)
sc.stop()
}
}