Skip to content

[SPARK-34581][SQL] Don't optimize out grouping expressions from aggregate expressions without aggregate function #32396

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,14 @@ object AggregateExpression {
filter,
NamedExpression.newExprId)
}

def containsAggregate(expr: Expression): Boolean = {
expr.find(isAggregate).isDefined
}

def isAggregate(expr: Expression): Boolean = {
expr.isInstanceOf[AggregateExpression] || PythonUDF.isGroupedAggPandasUDF(expr)
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,14 @@
package org.apache.spark.sql.catalyst.optimizer

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule

/**
* Simplify redundant [[CreateNamedStruct]], [[CreateArray]] and [[CreateMap]] expressions.
*/
object SimplifyExtractValueOps extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// One place where this optimization is invalid is an aggregation where the select
// list expression is a function of a grouping expression:
//
// SELECT struct(a,b).a FROM tbl GROUP BY struct(a,b)
//
// cannot be simplified to SELECT a FROM tbl GROUP BY struct(a,b). So just skip this
// optimization for Aggregates (although this misses some cases where the optimization
// can be made).
case a: Aggregate => a
case p => p.transformExpressionsUp {
// Remove redundant field extraction.
case GetStructField(createNamedStruct: CreateNamedStruct, ordinal, _) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ abstract class Optimizer(catalogManager: CatalogManager)
EliminateView,
ReplaceExpressions,
RewriteNonCorrelatedExists,
PullOutGroupingExpressions,
ComputeCurrentTime,
GetCurrentDatabaseAndCatalog(catalogManager)) ::
//////////////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -267,7 +268,8 @@ abstract class Optimizer(catalogManager: CatalogManager)
RewriteCorrelatedScalarSubquery.ruleName ::
RewritePredicateSubquery.ruleName ::
NormalizeFloatingNumbers.ruleName ::
ReplaceUpdateFieldsExpression.ruleName :: Nil
ReplaceUpdateFieldsExpression.ruleName ::
PullOutGroupingExpressions.ruleName :: Nil

/**
* Optimize all the subqueries inside expression.
Expand Down Expand Up @@ -524,23 +526,19 @@ object RemoveRedundantAggregates extends Rule[LogicalPlan] with AliasHelper {
}

private def lowerIsRedundant(upper: Aggregate, lower: Aggregate): Boolean = {
val upperHasNoAggregateExpressions = !upper.aggregateExpressions.exists(isAggregate)
val upperHasNoAggregateExpressions =
!upper.aggregateExpressions.exists(AggregateExpression.containsAggregate)

lazy val upperRefsOnlyDeterministicNonAgg = upper.references.subsetOf(AttributeSet(
lower
.aggregateExpressions
.filter(_.deterministic)
.filter(!isAggregate(_))
.filterNot(AggregateExpression.containsAggregate)
.map(_.toAttribute)
))

upperHasNoAggregateExpressions && upperRefsOnlyDeterministicNonAgg
}

private def isAggregate(expr: Expression): Boolean = {
expr.find(e => e.isInstanceOf[AggregateExpression] ||
PythonUDF.isGroupedAggPandasUDF(e)).isDefined
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalyst.optimizer

import scala.collection.mutable

import org.apache.spark.sql.catalyst.expressions.{Alias, Expression, NamedExpression}
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule

/**
* This rule ensures that [[Aggregate]] nodes doesn't contain complex grouping expressions in the
* optimization phase.
*
* Complex grouping expressions are pulled out to a [[Project]] node under [[Aggregate]] and are
* referenced in both grouping expressions and aggregate expressions without aggregate functions.
* These references ensure that optimization rules don't change the aggregate expressions to invalid
* ones that no longer refer to any grouping expressions and also simplify the expression
* transformations on the node (need to transform the expression only once).
*
* For example, in the following query Spark shouldn't optimize the aggregate expression
* `Not(IsNull(c))` to `IsNotNull(c)` as the grouping expression is `IsNull(c)`:
* SELECT not(c IS NULL)
* FROM t
* GROUP BY c IS NULL
* Instead, the aggregate expression references a `_groupingexpression` attribute:
* Aggregate [_groupingexpression#233], [NOT _groupingexpression#233 AS (NOT (c IS NULL))#230]
* +- Project [isnull(c#219) AS _groupingexpression#233]
* +- LocalRelation [c#219]
*/
object PullOutGroupingExpressions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
plan transform {
case a: Aggregate if a.resolved =>
val complexGroupingExpressionMap = mutable.LinkedHashMap.empty[Expression, NamedExpression]
val newGroupingExpressions = a.groupingExpressions.map {
case e if !e.foldable && e.children.nonEmpty =>
complexGroupingExpressionMap
.getOrElseUpdate(e.canonicalized, Alias(e, s"_groupingexpression")())
.toAttribute
case o => o
}
if (complexGroupingExpressionMap.nonEmpty) {
def replaceComplexGroupingExpressions(e: Expression): Expression = {
e match {
case _ if AggregateExpression.isAggregate(e) => e
case _ if e.foldable => e
case _ if complexGroupingExpressionMap.contains(e.canonicalized) =>
complexGroupingExpressionMap.get(e.canonicalized).map(_.toAttribute).getOrElse(e)
case _ => e.mapChildren(replaceComplexGroupingExpressions)
}
}

val newAggregateExpressions = a.aggregateExpressions
.map(replaceComplexGroupingExpressions(_).asInstanceOf[NamedExpression])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need to do a manual tree traversal if we want to stop recursion earlier, e.g. case _ if AggregateExpression.isAggregate(e) => e

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does the following one work?

a.transformExpressionsWithPruning(e => !(AggregateExpression.isAggregate(e) || e.fordable)) {
....
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, yes, this could work with some explicit casting.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But this would traverse on a.groupingExpressions too which is not needed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But this would traverse on a.groupingExpressions too which is not needed.

You're right. I think the following would behave the same as the manual recursion:

a.aggregateExpressions.map(_.transformWithPruning(e => !(AggregateExpression.isAggregate(e) || e.fordable))({
// the first two original case branches can be skipped here.
.....
}).asInstanceOf....)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Anyway, it's just my small preference -- it seems neater to use framework functions if it works. Feel free to merge whatever you feel comfortable with.

Copy link
Contributor Author

@peter-toth peter-toth May 1, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I'm leaving this PR as it is now.

But tested that peter-toth@ed374fe could work, just I need to cast TreePatternBits to Expression.
Although, I wonder if it would make sense to split plan and expression pruning in the future like this: peter-toth@d817fc7 and so this pruning (and probably there are other similar use cases where we want to stop traversal) became simpler: peter-toth@d817fc7#diff-57201016f79912c165715811d7f7f37e2acbef2ae7b241c3c8a0b928d0052eb5R61

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks a lot for exploring this, Peter! I'll think more about such use cases.

val newChild = Project(a.child.output ++ complexGroupingExpressionMap.values, a.child)
Aggregate(newGroupingExpressions, newAggregateExpressions, newChild)
} else {
a
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -297,11 +297,9 @@ object PhysicalAggregation {
val aggregateExpressions = resultExpressions.flatMap { expr =>
expr.collect {
// addExpr() always returns false for non-deterministic expressions and do not add them.
case agg: AggregateExpression
if !equivalentAggregateExpressions.addExpr(agg) => agg
case udf: PythonUDF
if PythonUDF.isGroupedAggPandasUDF(udf) &&
!equivalentAggregateExpressions.addExpr(udf) => udf
case a
if AggregateExpression.isAggregate(a) && !equivalentAggregateExpressions.addExpr(a) =>
a
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ class ComplexTypesSuite extends PlanTest with ExpressionEvalHelper {

object Optimizer extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Finish Analysis", Once,
PullOutGroupingExpressions) ::
Batch("collapse projections", FixedPoint(10),
CollapseProject) ::
Batch("Constant Folding", FixedPoint(10),
Expand All @@ -57,7 +59,7 @@ class ComplexTypesSuite extends PlanTest with ExpressionEvalHelper {
private def checkRule(originalQuery: LogicalPlan, correctAnswer: LogicalPlan) = {
val optimized = Optimizer.execute(originalQuery.analyze)
assert(optimized.resolved, "optimized plans must be still resolvable")
comparePlans(optimized, correctAnswer.analyze)
comparePlans(optimized, PullOutGroupingExpressions(correctAnswer.analyze))
}

test("explicit get from namedStruct") {
Expand Down Expand Up @@ -405,14 +407,6 @@ class ComplexTypesSuite extends PlanTest with ExpressionEvalHelper {
val arrayAggRel = relation.groupBy(
CreateArray(Seq('nullable_id)))(GetArrayItem(CreateArray(Seq('nullable_id)), 0))
checkRule(arrayAggRel, arrayAggRel)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can be removed now. It is optimized to:

Aggregate [*id#0L], [CASE WHEN (0 = *id#0L) THEN (*id#0L + 1) END AS a#0L]
+- LocalRelation <empty>, [*id#0L, nullable_id#0L]

// This could be done if we had a more complex rule that checks that
// the CreateMap does not come from key.
val originalQuery = relation
.groupBy('id)(
GetMapValue(CreateMap(Seq('id, 'id + 1L)), 0L) as "a"
)
checkRule(originalQuery, originalQuery)
}

test("SPARK-23500: namedStruct and getField in the same Project #1") {
Expand Down
10 changes: 10 additions & 0 deletions sql/core/src/test/resources/sql-tests/inputs/group-by.sql
Original file line number Diff line number Diff line change
Expand Up @@ -179,3 +179,13 @@ SELECT count(*) FROM test_agg WHERE k = 1 or k = 2 or count(*) + 1L > 1L or max(

-- Aggregate with multiple distinct decimal columns
SELECT AVG(DISTINCT decimal_col), SUM(DISTINCT decimal_col) FROM VALUES (CAST(1 AS DECIMAL(9, 0))) t(decimal_col);

-- SPARK-34581: Don't optimize out grouping expressions from aggregate expressions without aggregate function
SELECT not(a IS NULL), count(*) AS c
FROM testData
GROUP BY a IS NULL;

SELECT if(not(a IS NULL), rand(0), 1), count(*) AS c
FROM testData
GROUP BY a IS NULL;

24 changes: 23 additions & 1 deletion sql/core/src/test/resources/sql-tests/results/group-by.sql.out
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
-- Automatically generated by SQLQueryTestSuite
-- Number of queries: 62
-- Number of queries: 64


-- !query
Expand Down Expand Up @@ -642,3 +642,25 @@ SELECT AVG(DISTINCT decimal_col), SUM(DISTINCT decimal_col) FROM VALUES (CAST(1
struct<avg(DISTINCT decimal_col):decimal(13,4),sum(DISTINCT decimal_col):decimal(19,0)>
-- !query output
1.0000 1


-- !query
SELECT not(a IS NULL), count(*) AS c
FROM testData
GROUP BY a IS NULL
-- !query schema
struct<(NOT (a IS NULL)):boolean,c:bigint>
-- !query output
false 2
true 7


-- !query
SELECT if(not(a IS NULL), rand(0), 1), count(*) AS c
FROM testData
GROUP BY a IS NULL
-- !query schema
struct<(IF((NOT (a IS NULL)), rand(0), 1)):double,c:bigint>
-- !query output
0.7604953758285915 7
1.0 2
Original file line number Diff line number Diff line change
Expand Up @@ -199,19 +199,19 @@ Right keys [1]: [i_item_sk#16]
Join condition: None

(23) Project [codegen id : 8]
Output [3]: [d_date#12, i_item_sk#16, i_item_desc#17]
Output [3]: [d_date#12, i_item_sk#16, substr(i_item_desc#17, 1, 30) AS _groupingexpression#19]
Input [4]: [ss_item_sk#8, d_date#12, i_item_sk#16, i_item_desc#17]

(24) HashAggregate [codegen id : 8]
Input [3]: [d_date#12, i_item_sk#16, i_item_desc#17]
Keys [3]: [substr(i_item_desc#17, 1, 30) AS substr(i_item_desc#17, 1, 30)#19, i_item_sk#16, d_date#12]
Input [3]: [d_date#12, i_item_sk#16, _groupingexpression#19]
Keys [3]: [_groupingexpression#19, i_item_sk#16, d_date#12]
Functions [1]: [partial_count(1)]
Aggregate Attributes [1]: [count#20]
Results [4]: [substr(i_item_desc#17, 1, 30)#19, i_item_sk#16, d_date#12, count#21]
Results [4]: [_groupingexpression#19, i_item_sk#16, d_date#12, count#21]

(25) HashAggregate [codegen id : 8]
Input [4]: [substr(i_item_desc#17, 1, 30)#19, i_item_sk#16, d_date#12, count#21]
Keys [3]: [substr(i_item_desc#17, 1, 30)#19, i_item_sk#16, d_date#12]
Input [4]: [_groupingexpression#19, i_item_sk#16, d_date#12, count#21]
Keys [3]: [_groupingexpression#19, i_item_sk#16, d_date#12]
Functions [1]: [count(1)]
Aggregate Attributes [1]: [count(1)#22]
Results [2]: [i_item_sk#16 AS item_sk#23, count(1)#22 AS count(1)#24]
Expand Down Expand Up @@ -406,19 +406,19 @@ Right keys [1]: [i_item_sk#56]
Join condition: None

(69) Project [codegen id : 25]
Output [3]: [d_date#55, i_item_sk#56, i_item_desc#57]
Output [3]: [d_date#55, i_item_sk#56, substr(i_item_desc#57, 1, 30) AS _groupingexpression#58]
Input [4]: [ss_item_sk#54, d_date#55, i_item_sk#56, i_item_desc#57]

(70) HashAggregate [codegen id : 25]
Input [3]: [d_date#55, i_item_sk#56, i_item_desc#57]
Keys [3]: [substr(i_item_desc#57, 1, 30) AS substr(i_item_desc#57, 1, 30)#58, i_item_sk#56, d_date#55]
Input [3]: [d_date#55, i_item_sk#56, _groupingexpression#58]
Keys [3]: [_groupingexpression#58, i_item_sk#56, d_date#55]
Functions [1]: [partial_count(1)]
Aggregate Attributes [1]: [count#59]
Results [4]: [substr(i_item_desc#57, 1, 30)#58, i_item_sk#56, d_date#55, count#60]
Results [4]: [_groupingexpression#58, i_item_sk#56, d_date#55, count#60]

(71) HashAggregate [codegen id : 25]
Input [4]: [substr(i_item_desc#57, 1, 30)#58, i_item_sk#56, d_date#55, count#60]
Keys [3]: [substr(i_item_desc#57, 1, 30)#58, i_item_sk#56, d_date#55]
Input [4]: [_groupingexpression#58, i_item_sk#56, d_date#55, count#60]
Keys [3]: [_groupingexpression#58, i_item_sk#56, d_date#55]
Functions [1]: [count(1)]
Aggregate Attributes [1]: [count(1)#61]
Results [2]: [i_item_sk#56 AS item_sk#23, count(1)#61 AS count(1)#62]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ WholeStageCodegen (36)
Sort [item_sk]
Project [item_sk]
Filter [count(1)]
HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count]
HashAggregate [_groupingexpression,i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
HashAggregate [_groupingexpression,i_item_sk,d_date] [count,count]
Project [d_date,i_item_sk,i_item_desc]
SortMergeJoin [ss_item_sk,i_item_sk]
InputAdapter
Expand Down Expand Up @@ -177,8 +177,8 @@ WholeStageCodegen (36)
Sort [item_sk]
Project [item_sk]
Filter [count(1)]
HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count]
HashAggregate [_groupingexpression,i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
HashAggregate [_groupingexpression,i_item_sk,d_date] [count,count]
Project [d_date,i_item_sk,i_item_desc]
SortMergeJoin [ss_item_sk,i_item_sk]
InputAdapter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,23 +155,23 @@ Right keys [1]: [i_item_sk#14]
Join condition: None

(18) Project [codegen id : 3]
Output [3]: [d_date#11, i_item_sk#14, i_item_desc#15]
Output [3]: [d_date#11, i_item_sk#14, substr(i_item_desc#15, 1, 30) AS _groupingexpression#17]
Input [4]: [ss_item_sk#7, d_date#11, i_item_sk#14, i_item_desc#15]

(19) HashAggregate [codegen id : 3]
Input [3]: [d_date#11, i_item_sk#14, i_item_desc#15]
Keys [3]: [substr(i_item_desc#15, 1, 30) AS substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11]
Input [3]: [d_date#11, i_item_sk#14, _groupingexpression#17]
Keys [3]: [_groupingexpression#17, i_item_sk#14, d_date#11]
Functions [1]: [partial_count(1)]
Aggregate Attributes [1]: [count#18]
Results [4]: [substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11, count#19]
Results [4]: [_groupingexpression#17, i_item_sk#14, d_date#11, count#19]

(20) Exchange
Input [4]: [substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11, count#19]
Arguments: hashpartitioning(substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11, 5), ENSURE_REQUIREMENTS, [id=#20]
Input [4]: [_groupingexpression#17, i_item_sk#14, d_date#11, count#19]
Arguments: hashpartitioning(_groupingexpression#17, i_item_sk#14, d_date#11, 5), ENSURE_REQUIREMENTS, [id=#20]

(21) HashAggregate [codegen id : 4]
Input [4]: [substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11, count#19]
Keys [3]: [substr(i_item_desc#15, 1, 30)#17, i_item_sk#14, d_date#11]
Input [4]: [_groupingexpression#17, i_item_sk#14, d_date#11, count#19]
Keys [3]: [_groupingexpression#17, i_item_sk#14, d_date#11]
Functions [1]: [count(1)]
Aggregate Attributes [1]: [count(1)#21]
Results [2]: [i_item_sk#14 AS item_sk#22, count(1)#21 AS count(1)#23]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ WholeStageCodegen (24)
WholeStageCodegen (4)
Project [item_sk]
Filter [count(1)]
HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
HashAggregate [_groupingexpression,i_item_sk,d_date,count] [count(1),item_sk,count(1),count]
InputAdapter
Exchange [substr(i_item_desc, 1, 30),i_item_sk,d_date] #5
Exchange [_groupingexpression,i_item_sk,d_date] #5
WholeStageCodegen (3)
HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count]
HashAggregate [_groupingexpression,i_item_sk,d_date] [count,count]
Project [d_date,i_item_sk,i_item_desc]
BroadcastHashJoin [ss_item_sk,i_item_sk]
Project [ss_item_sk,d_date]
Expand Down
Loading