diff --git a/common/src/main/scala/org/apache/comet/CometConf.scala b/common/src/main/scala/org/apache/comet/CometConf.scala index 42fb5fb4c..82f6cc9ab 100644 --- a/common/src/main/scala/org/apache/comet/CometConf.scala +++ b/common/src/main/scala/org/apache/comet/CometConf.scala @@ -401,6 +401,14 @@ object CometConf extends ShimCometConf { .booleanConf .createWithDefault(false) + val COMET_CBO_ENABLED: ConfigEntry[Boolean] = + conf("spark.comet.cbo.enabled") + .doc( + "Cost-based optimizer to avoid performance regressions where Comet plan may " + + "be slower than Spark plan.") + .booleanConf + .createWithDefault(false) + } object ConfigHelpers { diff --git a/docs/source/user-guide/configs.md b/docs/source/user-guide/configs.md index 104f29ce8..7781d846c 100644 --- a/docs/source/user-guide/configs.md +++ b/docs/source/user-guide/configs.md @@ -25,6 +25,7 @@ Comet provides the following configuration settings. |--------|-------------|---------------| | spark.comet.batchSize | The columnar batch size, i.e., the maximum number of rows that a batch can contain. | 8192 | | spark.comet.cast.allowIncompatible | Comet is not currently fully compatible with Spark for all cast operations. Set this config to true to allow them anyway. See compatibility guide for more information. | false | +| spark.comet.cbo.enabled | Cost-based optimizer to avoid performance regressions where Comet plan may be slower than Spark plan. | false | | spark.comet.columnar.shuffle.async.enabled | Whether to enable asynchronous shuffle for Arrow-based shuffle. By default, this config is false. | false | | spark.comet.columnar.shuffle.async.max.thread.num | Maximum number of threads on an executor used for Comet async columnar shuffle. By default, this config is 100. This is the upper bound of total number of shuffle threads per executor. In other words, if the number of cores * the number of shuffle threads per task `spark.comet.columnar.shuffle.async.thread.num` is larger than this config. Comet will use this config as the number of shuffle threads per executor instead. | 100 | | spark.comet.columnar.shuffle.async.thread.num | Number of threads used for Comet async columnar shuffle per shuffle task. By default, this config is 3. Note that more threads means more memory requirement to buffer shuffle data before flushing to disk. Also, more threads may not always improve performance, and should be set based on the number of cores available. | 3 | diff --git a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala index 37ca55e27..e19361f5a 100644 --- a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala +++ b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala @@ -65,7 +65,7 @@ class CometSparkSessionExtensions extensions.injectColumnar { session => CometScanColumnar(session) } extensions.injectColumnar { session => CometExecColumnar(session) } extensions.injectQueryStagePrepRule { session => CometScanRule(session) } - extensions.injectQueryStagePrepRule { session => CometExecRule(session) } + extensions.injectQueryStagePrepRule { session => CometQueryStagePrepRule(session) } } case class CometScanColumnar(session: SparkSession) extends ColumnarRule { @@ -73,7 +73,7 @@ class CometSparkSessionExtensions } case class CometExecColumnar(session: SparkSession) extends ColumnarRule { - override def preColumnarTransitions: Rule[SparkPlan] = CometExecRule(session) + override def preColumnarTransitions: Rule[SparkPlan] = CometPreColumnarRule(session) override def postColumnarTransitions: Rule[SparkPlan] = EliminateRedundantTransitions(session) @@ -192,6 +192,54 @@ class CometSparkSessionExtensions } } + /** + * CometQueryStagePrepRule gets called from AQE for the whole plan multiple times as the plan is + * re-optimized after query stages complete. This is where we translate Spark operators and + * expressions into Comet/DataFusion native versions. + */ + case class CometQueryStagePrepRule(session: SparkSession) extends Rule[SparkPlan] { + + private val execRule = CometExecRule(session) + + def apply(plan: SparkPlan): SparkPlan = { + var needToReplan = false + val cometPlan = execRule.apply(plan) + if (CometConf.COMET_CBO_ENABLED.get()) { + // simple heuristic to avoid moving from Spark execution to Comet execution just + // for the final sort + cometPlan match { + case CometSortExec(_, _, _, e: CometShuffleExchangeExec, _) + if !e.child.supportsColumnar => + needToReplan = true + // fall back for sort + plan.setTagValue(CometExplainInfo.CBO_FALLBACK, "avoid move to Comet just for sort") + // fall back for exchange as well + plan.children.head + .setTagValue(CometExplainInfo.CBO_FALLBACK, "avoid move to Comet just for sort") + case _ => + } + if (needToReplan) { + return execRule.apply(plan) + } + } + cometPlan + } + } + + /** + * CometPreColumnarRule gets called for each individual query stage as it is being prepared for + * execution. As the name suggests, this rule is called before any columnar transitions are + * inserted into the plan. + */ + case class CometPreColumnarRule(session: SparkSession) extends Rule[SparkPlan] { + + private val execRule = CometExecRule(session) + + def apply(plan: SparkPlan): SparkPlan = { + execRule.apply(plan) + } + } + case class CometExecRule(session: SparkSession) extends Rule[SparkPlan] { private def applyCometShuffle(plan: SparkPlan): SparkPlan = { plan.transformUp { diff --git a/spark/src/main/scala/org/apache/comet/ExtendedExplainInfo.scala b/spark/src/main/scala/org/apache/comet/ExtendedExplainInfo.scala index d7ef4e9f3..690672bac 100644 --- a/spark/src/main/scala/org/apache/comet/ExtendedExplainInfo.scala +++ b/spark/src/main/scala/org/apache/comet/ExtendedExplainInfo.scala @@ -85,4 +85,5 @@ class ExtendedExplainInfo extends ExtendedExplainGenerator { object CometExplainInfo { val EXTENSION_INFO = new TreeNodeTag[Set[String]]("CometExtensionInfo") + val CBO_FALLBACK = new TreeNodeTag[String]("CometCboFallback") } diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala index 1e61ef75e..238c47672 100644 --- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala +++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala @@ -41,7 +41,7 @@ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String -import org.apache.comet.CometConf +import org.apache.comet.{CometConf, CometExplainInfo} import org.apache.comet.CometSparkSessionExtensions.{isCometOperatorEnabled, isCometScan, isSpark34Plus, withInfo} import org.apache.comet.expressions.{CometCast, CometEvalMode, Compatible, Incompatible, Unsupported} import org.apache.comet.serde.ExprOuterClass.{AggExpr, DataType => ProtoDataType, Expr, ScalarFunc} @@ -2268,6 +2268,11 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim childOp.foreach(result.addChildren) op match { + case _ if op.getTagValue(CometExplainInfo.CBO_FALLBACK).isDefined => + logWarning("QueryPlanSerde cbo fallback") + withInfo(op, "cbo: " + op.getTagValue(CometExplainInfo.CBO_FALLBACK).get) + None + case ProjectExec(projectList, child) if isCometOperatorEnabled(op.conf, "project") => val exprs = projectList.map(exprToProto(_, child.output))