0
点赞
收藏
分享

微信扫一扫

自定义聚合函数(强类型)


强类型用户自定义聚合函数:通过继承Aggregator来实现强类型自定义聚合函数

package sparksql01

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
import org.apache.spark.sql.expressions.Aggregator

object SparkSQL07_UDAF_Class {
def main(args: Array[String]): Unit = {
//创建SparkConf
val conf = new SparkConf().setMaster("local[*]").setAppName("Test")

//创建SparkSession
val spark = SparkSession.builder().config(conf).getOrCreate()

//创建聚合函数对象
val udaf = new MyAgeAvgClassFuction

//将聚合函数转换为查询列
val avgCol = udaf.toColumn.name("avgAge")

//读取数据
val frame = spark.read.json("input/user.json")

//将DataFrame转换为RDD
import spark.implicits._
val userDS = frame.as[UserBean]

//应用函数
userDS.select(avgCol).show()

//关闭资源
spark.stop()
}
}

case class UserBean(name:String,age:BigInt)
case class AvgBuffer(var sum:BigInt,var count:Int)

//强类型
class MyAgeAvgClassFuction extends Aggregator[UserBean,AvgBuffer,Double]{

//初始化
override def zero: AvgBuffer = {
AvgBuffer(0,0)
}

//聚合数据
override def reduce(b: AvgBuffer, a: UserBean): AvgBuffer = {
b.sum = b.sum + a.age
b.count = b.count + 1

b
}

//缓冲区的合并操作
override def merge(b1: AvgBuffer, b2: AvgBuffer): AvgBuffer = {
b1.sum = b1.sum + b2.sum
b1.count = b1.count + b2.count

b1
}

//完成计算
override def finish(reduction: AvgBuffer): Double = {
reduction.sum.toDouble / reduction.count
}

override def bufferEncoder: Encoder[AvgBuffer] = Encoders.product

override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

 

举报

相关推荐

0 条评论