Skip to content

Commit 252f5c9

Browse files
committed
add docs
1 parent 03cc20d commit 252f5c9

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousCoalesceExec.scala

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,11 @@ import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, SinglePartiti
2727
import org.apache.spark.sql.execution.SparkPlan
2828
import org.apache.spark.sql.execution.streaming.continuous.shuffle.{ContinuousShuffleReadPartition, ContinuousShuffleReadRDD}
2929

30+
/**
31+
* Physical plan for coalescing a continuous processing plan.
32+
*
33+
* Currently, only coalesces to a single partition are supported. `numPartitions` must be 1.
34+
*/
3035
case class ContinuousCoalesceExec(numPartitions: Int, child: SparkPlan) extends SparkPlan {
3136
override def output: Seq[Attribute] = child.output
3237

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousCoalesceRDD.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,10 @@ case class ContinuousCoalesceRDDPartition(index: Int) extends Partition {
2727
private[continuous] var writersInitialized: Boolean = false
2828
}
2929

30+
/**
31+
* RDD for continuous coalescing. Asynchronously writes all partitions of `prev` into a local
32+
* continuous shuffle, and then reads them in the task thread using `reader`.
33+
*/
3034
class ContinuousCoalesceRDD(var reader: ContinuousShuffleReadRDD, var prev: RDD[InternalRow])
3135
extends RDD[InternalRow](reader.context, Nil) {
3236

0 commit comments

Comments
 (0)