org.apache.spark.sql.execution.window.WindowExec.scala Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.window
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.{ExternalAppendOnlyUnsafeRowArray, SparkPlan, UnaryExecNode}
import org.apache.spark.sql.types.{CalendarIntervalType, DateType, IntegerType, TimestampType}
/**
* This class calculates and outputs (windowed) aggregates over the rows in a single (sorted)
* partition. The aggregates are calculated for each row in the group. Special processing
* instructions, frames, are used to calculate these aggregates. Frames are processed in the order
* specified in the window specification (the ORDER BY ... clause). There are four different frame
* types:
* - Entire partition: The frame is the entire partition, i.e.
* UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. For this case, window function will take all
* rows as inputs and be evaluated once.
* - Growing frame: We only add new rows into the frame, Examples are:
* 1. UNBOUNDED PRECEDING AND 1 PRECEDING
* 2. UNBOUNDED PRECEDING AND CURRENT ROW
* 3. UNBOUNDED PRECEDING AND 1 FOLLOWING
* Every time we move to a new row to process, we add some rows to the frame. We do not remove
* rows from this frame.
* - Shrinking frame: We only remove rows from the frame, Examples are:
* 1. 1 PRECEDING AND UNBOUNDED FOLLOWING
* 2. CURRENT ROW AND UNBOUNDED FOLLOWING
* 3. 1 FOLLOWING AND UNBOUNDED FOLLOWING
* Every time we move to a new row to process, we remove some rows from the frame. We do not add
* rows to this frame.
* - Moving frame: Every time we move to a new row to process, we remove some rows from the frame
* and we add some rows to the frame. Examples are:
* 1. 2 PRECEDING AND 1 PRECEDING
* 2. 1 PRECEDING AND CURRENT ROW
* 3. CURRENT ROW AND 1 FOLLOWING
* 4. 1 PRECEDING AND 1 FOLLOWING
* 5. 1 FOLLOWING AND 2 FOLLOWING
* - Offset frame: The frame consist of one row, which is an offset number of rows away from the
* current row. Only [[OffsetWindowFunction]]s can be processed in an offset frame.
*
* Different frame boundaries can be used in Growing, Shrinking and Moving frames. A frame
* boundary can be either Row or Range based:
* - Row Based: A row based boundary is based on the position of the row within the partition.
* An offset indicates the number of rows above or below the current row, the frame for the
* current row starts or ends. For instance, given a row based sliding frame with a lower bound
* offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from
* index 4 to index 6.
* - Range based: A range based boundary is based on the actual value of the ORDER BY
* expression(s). An offset is used to alter the value of the ORDER BY expression, for
* instance if the current order by expression has a value of 10 and the lower bound offset
* is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a
* number of constraints on the ORDER BY expressions: there can be only one expression and this
* expression must have a numerical data type. An exception can be made when the offset is 0,
* because no value modification is needed, in this case multiple and non-numeric ORDER BY
* expression are allowed.
*
* This is quite an expensive operator because every row for a single group must be in the same
* partition and partitions must be sorted according to the grouping and sort order. The operator
* requires the planner to take care of the partitioning and sorting.
*
* The operator is semi-blocking. The window functions and aggregates are calculated one group at
* a time, the result will only be made available after the processing for the entire group has
* finished. The operator is able to process different frame configurations at the same time. This
* is done by delegating the actual frame processing (i.e. calculation of the window functions) to
* specialized classes, see [[WindowFunctionFrame]], which take care of their own frame type:
* Entire Partition, Sliding, Growing & Shrinking. Boundary evaluation is also delegated to a pair
* of specialized classes: [[RowBoundOrdering]] & [[RangeBoundOrdering]].
*/
case class WindowExec(
windowExpression: Seq[NamedExpression],
partitionSpec: Seq[Expression],
orderSpec: Seq[SortOrder],
child: SparkPlan)
extends WindowExecBase(windowExpression, partitionSpec, orderSpec, child) {
override def output: Seq[Attribute] =
child.output ++ windowExpression.map(_.toAttribute)
override def requiredChildDistribution: Seq[Distribution] = {
if (partitionSpec.isEmpty) {
// Only show warning when the number of bytes is larger than 100 MiB?
logWarning("No Partition Defined for Window operation! Moving all data to a single "
+ "partition, this can cause serious performance degradation.")
AllTuples :: Nil
} else ClusteredDistribution(partitionSpec) :: Nil
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
Seq(partitionSpec.map(SortOrder(_, Ascending)) ++ orderSpec)
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def outputPartitioning: Partitioning = child.outputPartitioning
protected override def doExecute(): RDD[InternalRow] = {
// Unwrap the window expressions and window frame factories from the map.
val expressions = windowFrameExpressionFactoryPairs.flatMap(_._1)
val factories = windowFrameExpressionFactoryPairs.map(_._2).toArray
val inMemoryThreshold = sqlContext.conf.windowExecBufferInMemoryThreshold
val spillThreshold = sqlContext.conf.windowExecBufferSpillThreshold
// Start processing.
child.execute().mapPartitions { stream =>
new Iterator[InternalRow] {
// Get all relevant projections.
val result = createResultProjection(expressions)
val grouping = UnsafeProjection.create(partitionSpec, child.output)
// Manage the stream and the grouping.
var nextRow: UnsafeRow = null
var nextGroup: UnsafeRow = null
var nextRowAvailable: Boolean = false
private[this] def fetchNextRow(): Unit = {
nextRowAvailable = stream.hasNext
if (nextRowAvailable) {
nextRow = stream.next().asInstanceOf[UnsafeRow]
nextGroup = grouping(nextRow)
} else {
nextRow = null
nextGroup = null
}
}
fetchNextRow()
// Manage the current partition.
val buffer: ExternalAppendOnlyUnsafeRowArray =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
var bufferIterator: Iterator[UnsafeRow] = _
val windowFunctionResult = new SpecificInternalRow(expressions.map(_.dataType))
val frames = factories.map(_(windowFunctionResult))
val numFrames = frames.length
private[this] def fetchNextPartition(): Unit = {
// Collect all the rows in the current partition.
// Before we start to fetch new input rows, make a copy of nextGroup.
val currentGroup = nextGroup.copy()
// clear last partition
buffer.clear()
while (nextRowAvailable && nextGroup == currentGroup) {
buffer.add(nextRow)
fetchNextRow()
}
// Setup the frames.
var i = 0
while (i < numFrames) {
frames(i).prepare(buffer)
i += 1
}
// Setup iteration
rowIndex = 0
bufferIterator = buffer.generateIterator()
}
// Iteration
var rowIndex = 0
override final def hasNext: Boolean =
(bufferIterator != null && bufferIterator.hasNext) || nextRowAvailable
val join = new JoinedRow
override final def next(): InternalRow = {
// Load the next partition if we need to.
if ((bufferIterator == null || !bufferIterator.hasNext) && nextRowAvailable) {
fetchNextPartition()
}
if (bufferIterator.hasNext) {
val current = bufferIterator.next()
// Get the results for the window frames.
var i = 0
while (i < numFrames) {
frames(i).write(rowIndex, current)
i += 1
}
// 'Merge' the input row with the window function result
join(current, windowFunctionResult)
rowIndex += 1
// Return the projection.
result(join)
} else {
throw new NoSuchElementException
}
}
}
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy