Skip to content

Commit

Permalink
Experimental cost-based optimizer to avoid performance regressions
Browse files Browse the repository at this point in the history
  • Loading branch information
andygrove committed Jun 20, 2024
1 parent 28309a4 commit fc3e5ff
Show file tree
Hide file tree
Showing 4 changed files with 95 additions and 0 deletions.
8 changes: 8 additions & 0 deletions common/src/main/scala/org/apache/comet/CometConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,14 @@ object CometConf extends ShimCometConf {
.booleanConf
.createWithDefault(false)

val COMET_CBO_ENABLED: ConfigEntry[Boolean] =
conf("spark.comet.cbo.enabled")
.doc(
"Cost-based optimizer to avoid performance regressions where Comet plan may " +
"be slower than Spark plan.")
.booleanConf
.createWithDefault(false)

}

object ConfigHelpers {
Expand Down
1 change: 1 addition & 0 deletions docs/source/user-guide/configs.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Comet provides the following configuration settings.
|--------|-------------|---------------|
| spark.comet.batchSize | The columnar batch size, i.e., the maximum number of rows that a batch can contain. | 8192 |
| spark.comet.cast.allowIncompatible | Comet is not currently fully compatible with Spark for all cast operations. Set this config to true to allow them anyway. See compatibility guide for more information. | false |
| spark.comet.cbo.enabled | Cost-based optimizer to avoid performance regressions where Comet plan may be slower than Spark plan. | false |
| spark.comet.columnar.shuffle.async.enabled | Whether to enable asynchronous shuffle for Arrow-based shuffle. By default, this config is false. | false |
| spark.comet.columnar.shuffle.async.max.thread.num | Maximum number of threads on an executor used for Comet async columnar shuffle. By default, this config is 100. This is the upper bound of total number of shuffle threads per executor. In other words, if the number of cores * the number of shuffle threads per task `spark.comet.columnar.shuffle.async.thread.num` is larger than this config. Comet will use this config as the number of shuffle threads per executor instead. | 100 |
| spark.comet.columnar.shuffle.async.thread.num | Number of threads used for Comet async columnar shuffle per shuffle task. By default, this config is 3. Note that more threads means more memory requirement to buffer shuffle data before flushing to disk. Also, more threads may not always improve performance, and should be set based on the number of cores available. | 3 |
Expand Down
76 changes: 76 additions & 0 deletions spark/src/main/scala/org/apache/comet/CometCostEvaluator.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet

import org.apache.spark.internal.Logging
import org.apache.spark.sql.comet.{CometExec, CometRowToColumnarExec}
import org.apache.spark.sql.execution.{ColumnarToRowExec, RowToColumnarExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive.{Cost, CostEvaluator, SimpleCost}

/**
* The goal of this cost model is to avoid introducing performance regressions in query stages
* during AQE.
*
* This evaluator will be called twice; once for the original Spark plan and once for the Comet
* plan. Spark will choose the cheapest plan.
*/
class CometCostEvaluator extends CostEvaluator with Logging {

/** Baseline cost for Spark operator is 1.0 */
val DEFAULT_SPARK_OPERATOR_COST = 1.0

/** Relative cost of Comet operator. This is intentionally conservative for now */
val DEFAULT_COMET_OPERATOR_COST = 0.8

/** Transitions are expensive */
val DEFAULT_TRANSITION_COST = 2.0

override def evaluateCost(plan: SparkPlan): Cost = {

def computePlanCost(plan: SparkPlan): Double = {
// TODO this is a crude prototype where we just penalize transitions, but
// this can evolve into a true cost model where we have real numbers for the relative
// performance of Comet operators & expressions versus the Spark versions
//
// Some areas to explore
// - can we use statistics from previous query stage(s)?
// - transition after filter should be cheaper than transition before filter (such as when
// reading from Parquet followed by filter. Comet will filter first then transition)
val childPlanCost = plan.children.map(computePlanCost).sum
val operatorCost = plan match {
case RowToColumnarExec(_) => DEFAULT_TRANSITION_COST
case ColumnarToRowExec(_) => DEFAULT_TRANSITION_COST
case CometRowToColumnarExec(_) => DEFAULT_TRANSITION_COST
case _: CometExec => DEFAULT_COMET_OPERATOR_COST
case _ => DEFAULT_SPARK_OPERATOR_COST
}
operatorCost + childPlanCost
}

// TODO can we access statistics from previous query stages?
val estimatedRowCount = 1000
val cost = (computePlanCost(plan) * estimatedRowCount).toLong

logWarning(s"Computed cost of $cost for $plan")

SimpleCost(cost)
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,16 @@ class CometSparkSessionExtensions
} else {
var newPlan = transform(plan)

if (CometConf.COMET_CBO_ENABLED.get()) {
val costEvaluator = new CometCostEvaluator()
val sparkCost = costEvaluator.evaluateCost(plan)
val cometCost = costEvaluator.evaluateCost(newPlan)
if (cometCost > sparkCost) {
logWarning(s"Comet plan is more expensive than Spark plan ($cometCost > $sparkCost")
return plan
}
}

// if the plan cannot be run fully natively then explain why (when appropriate
// config is enabled)
if (CometConf.COMET_EXPLAIN_FALLBACK_ENABLED.get()) {
Expand Down

0 comments on commit fc3e5ff

Please sign in to comment.