I use Spark 2.0.1 and Scala 2.11.
This is a question related to user-defined aggregate function (UDAF) in Spark. I'm using the example answer provided here to ask my question:
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Row, Column}
object DummyUDAF extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("x", StringType)
def bufferSchema = new StructType()
.add("buff", ArrayType(LongType))
.add("buff2", ArrayType(DoubleType))
def dataType = new StructType()
.add("xs", ArrayType(LongType))
.add("ys", ArrayType(DoubleType))
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {}
def update(buffer: MutableAggregationBuffer, input: Row) = {}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {}
def evaluate(buffer: Row) = (Array(1L, 2L, 3L), Array(1.0, 2.0, 3.0))
}
I'm able to return multiple Map
s instead of an Array
easily, but not able to mutate the map in the update
method.
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Row, Column}
import scala.collection.mutable.Map
object DummyUDAF extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("x", DoubleType).add("y", IntegerType)
def bufferSchema = new StructType()
.add("buff", MapType(DoubleType, IntegerType))
.add("buff2", MapType(DoubleType, IntegerType))
def dataType = new StructType()
.add("xs", MapType(DoubleType, IntegerType))
.add("ys", MapType(DoubleType, IntegerType))
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer(0) = scala.collection.mutable.Map[Double,Int]()
buffer(1) = scala.collection.mutable.Map[Double,Int]()
}
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
buffer(0).asInstanceOf[Map[Double,Int]](input.getDouble(0)) = input.getInt(1)
buffer(1).asInstanceOf[Map[Double,Int]](input.getDouble(0)*10) = input.getInt(1)*10
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
buffer1(0).asInstanceOf[Map[Double,Int]] ++= buffer2(0).asInstanceOf[Map[Double,Int]]
buffer1(1).asInstanceOf[Map[Double,Int]] ++= buffer2(1).asInstanceOf[Map[Double,Int]]
}
//def evaluate(buffer: Row) = (Map(1.0->10,2.0->20), Map(10.0->100,11.0->110))
def evaluate(buffer: Row) = (buffer(0).asInstanceOf[Map[Double,Int]], buffer(1).asInstanceOf[Map[Double,Int]])
}
This compiles fine, but gives a runtime error:
val df = Seq((1.0, 1), (2.0, 2)).toDF("k", "v")
df.select(DummyUDAF($"k", $"v")).show(1, false)
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 70.0 failed 4 times, most recent failure: Lost task 1.3 in stage 70.0 (TID 204, 10.91.252.25): java.lang.ClassCastException: scala.collection.immutable.Map$EmptyMap$ cannot be cast to scala.collection.mutable.Map
Another solution discussed here indicates that this could be a problem due to MapType StructType
. However, when I try out the solution mentioned I still get the same error.
val distudaf = new DistinctValues
val df = Seq(("a", "a1"), ("a", "a1"), ("a", "a2"), ("b", "b1"), ("b", "b2"), ("b", "b3"), ("b", "b1"), ("b", "b1")).toDF("col1", "col2")
df.groupBy("col1").agg(distudaf($"col2").as("DV")).show
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 22.0 failed 4 times, most recent failure: Lost task 1.3 in stage 22.0 (TID 100, 10.91.252.25): java.lang.ClassCastException: scala.collection.immutable.Map$EmptyMap$ cannot be cast to scala.collection.mutable.Map
My preference would be to mutate the Map, given that I expect the Map to be huge, and making a copy and re-assigning may be lead to performance/memory bottlenecks)