Skip to content

Commit c00c0c4

Browse files
beliefercloud-fan
authored andcommitted
[SPARK-39858][SQL] Remove unnecessary AliasHelper or PredicateHelper for some rules
### What changes were proposed in this pull request? When I using `AliasHelper`, I find that some rules inherit it instead of using it. This PR removes unnecessary `AliasHelper` or `PredicateHelper` in the following cases: - The rule inherit `AliasHelper` instead of using it. In this case, we can remove `AliasHelper` directly. - The rule inherit `PredicateHelper` instead of using it. In this case, we can remove `PredicateHelper` directly. - The rule inherit `AliasHelper` and `PredicateHelper`. In fact, `PredicateHelper` already extends `AliasHelper`. In this case, we can remove `AliasHelper`. - The rule inherit `OperationHelper` and `PredicateHelper`. In fact, `OperationHelper` already extends `PredicateHelper`. In this case, we can remove `PredicateHelper`. - The rule inherit `PlanTest` and `PredicateHelper`. In fact, `PlanTest` already extends `PredicateHelper`. In this case, we can remove `PredicateHelper`. - The rule inherit `QueryTest` and `PredicateHelper`. In fact, `QueryTest` already extends `PredicateHelper`. In this case, we can remove `PredicateHelper`. ### Why are the changes needed? Remove unnecessary `AliasHelper` or `PredicateHelper` for some rules ### Does this PR introduce _any_ user-facing change? 'No'. Just improve the inner implementation. ### How was this patch tested? N/A Closes #37272 from beliefer/SPARK-39858. Authored-by: Jiaan Geng <beliefer@163.com> Signed-off-by: Wenchen Fan <wenchen@databricks.com>
1 parent 2f812c0 commit c00c0c4

23 files changed

+34
-45
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ class Analyzer(override val catalogManager: CatalogManager)
739739
}
740740
}
741741

742-
object ResolvePivot extends Rule[LogicalPlan] with AliasHelper {
742+
object ResolvePivot extends Rule[LogicalPlan] {
743743
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsWithPruning(
744744
_.containsPattern(PIVOT), ruleId) {
745745
case p: Pivot if !p.childrenResolved || !p.aggregates.forall(_.resolved)
@@ -2358,7 +2358,7 @@ class Analyzer(override val catalogManager: CatalogManager)
23582358
*
23592359
* Note: CTEs are handled in CTESubstitution.
23602360
*/
2361-
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
2361+
object ResolveSubquery extends Rule[LogicalPlan] {
23622362
/**
23632363
* Resolve the correlated expressions in a subquery, as if the expressions live in the outer
23642364
* plan. All resolved outer references are wrapped in an [[OuterReference]]
@@ -2531,7 +2531,7 @@ class Analyzer(override val catalogManager: CatalogManager)
25312531
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
25322532
* underlying aggregate operator and then projected away after the original operator.
25332533
*/
2534-
object ResolveAggregateFunctions extends Rule[LogicalPlan] with AliasHelper {
2534+
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
25352535
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUpWithPruning(
25362536
_.containsPattern(AGGREGATE), ruleId) {
25372537
// Resolve aggregate with having clause to Filter(..., Aggregate()). Note, to avoid wrongly

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/CostBasedJoinReorder.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ case class Cost(card: BigInt, size: BigInt) {
401401
*
402402
* Filters (2) and (3) are not implemented.
403403
*/
404-
object JoinReorderDPFilters extends PredicateHelper {
404+
object JoinReorderDPFilters {
405405
/**
406406
* Builds join graph information to be used by the filtering strategies.
407407
* Currently, it builds the sets of star/non-star joins.

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/MergeScalarSubqueries.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ import org.apache.spark.sql.types.DataType
100100
* : +- ReusedSubquery Subquery scalar-subquery#242, [id=#125]
101101
* +- *(1) Scan OneRowRelation[]
102102
*/
103-
object MergeScalarSubqueries extends Rule[LogicalPlan] with PredicateHelper {
103+
object MergeScalarSubqueries extends Rule[LogicalPlan] {
104104
def apply(plan: LogicalPlan): LogicalPlan = {
105105
plan match {
106106
// Subquery reuse needs to be enabled for this optimization.

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -768,7 +768,7 @@ object LimitPushDown extends Rule[LogicalPlan] {
768768
* safe to pushdown Filters and Projections through it. Filter pushdown is handled by another
769769
* rule PushDownPredicates. Once we add UNION DISTINCT, we will not be able to pushdown Projections.
770770
*/
771-
object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper {
771+
object PushProjectionThroughUnion extends Rule[LogicalPlan] {
772772

773773
/**
774774
* Maps Attributes from the left side to the corresponding Attribute on the right side.
@@ -1631,7 +1631,7 @@ object PruneFilters extends Rule[LogicalPlan] with PredicateHelper {
16311631
* This rule improves performance of predicate pushdown for cascading joins such as:
16321632
* Filter-Join-Join-Join. Most predicates can be pushed down in a single pass.
16331633
*/
1634-
object PushDownPredicates extends Rule[LogicalPlan] with PredicateHelper {
1634+
object PushDownPredicates extends Rule[LogicalPlan] {
16351635
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
16361636
_.containsAnyPattern(FILTER, JOIN)) {
16371637
CombineFilters.applyLocally

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ object ConstantFolding extends Rule[LogicalPlan] {
109109
* - Using this mapping, replace occurrence of the attributes with the corresponding constant values
110110
* in the AND node.
111111
*/
112-
object ConstantPropagation extends Rule[LogicalPlan] with PredicateHelper {
112+
object ConstantPropagation extends Rule[LogicalPlan] {
113113
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
114114
_.containsAllPatterns(LITERAL, FILTER), ruleId) {
115115
case f: Filter =>
@@ -532,7 +532,7 @@ object SimplifyBinaryComparison
532532
/**
533533
* Simplifies conditional expressions (if / case).
534534
*/
535-
object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
535+
object SimplifyConditionals extends Rule[LogicalPlan] {
536536
private def falseOrNullLiteral(e: Expression): Boolean = e match {
537537
case FalseLiteral => true
538538
case Literal(null, _) => true
@@ -617,7 +617,7 @@ object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
617617
/**
618618
* Push the foldable expression into (if / case) branches.
619619
*/
620-
object PushFoldableIntoBranches extends Rule[LogicalPlan] with PredicateHelper {
620+
object PushFoldableIntoBranches extends Rule[LogicalPlan] {
621621

622622
// To be conservative here: it's only a guaranteed win if all but at most only one branch
623623
// end up being not foldable.

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import org.apache.spark.sql.errors.QueryCompilationErrors
2929
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2ScanRelation}
3030
import org.apache.spark.sql.internal.SQLConf
3131

32-
trait OperationHelper extends AliasHelper with PredicateHelper {
32+
trait OperationHelper extends PredicateHelper {
3333
import org.apache.spark.sql.catalyst.optimizer.CollapseProject.canCollapseExpressions
3434

3535
type ReturnType =
@@ -119,7 +119,7 @@ trait OperationHelper extends AliasHelper with PredicateHelper {
119119
* [[org.apache.spark.sql.catalyst.expressions.Alias Aliases]] are in-lined/substituted if
120120
* necessary.
121121
*/
122-
object PhysicalOperation extends OperationHelper with PredicateHelper {
122+
object PhysicalOperation extends OperationHelper {
123123
override protected def legacyMode: Boolean = true
124124
}
125125

@@ -128,7 +128,7 @@ object PhysicalOperation extends OperationHelper with PredicateHelper {
128128
* operations even if they are non-deterministic, as long as they satisfy the
129129
* requirement of CollapseProject and CombineFilters.
130130
*/
131-
object ScanOperation extends OperationHelper with PredicateHelper {
131+
object ScanOperation extends OperationHelper {
132132
override protected def legacyMode: Boolean = false
133133
}
134134

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExtractPredicatesWithinOutputSetSuite.scala

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,7 @@ import org.apache.spark.sql.catalyst.dsl.expressions._
2222
import org.apache.spark.sql.catalyst.plans.PlanTest
2323
import org.apache.spark.sql.types.BooleanType
2424

25-
class ExtractPredicatesWithinOutputSetSuite
26-
extends SparkFunSuite
27-
with PredicateHelper
28-
with PlanTest {
25+
class ExtractPredicatesWithinOutputSetSuite extends SparkFunSuite with PlanTest {
2926
private val a = AttributeReference("A", BooleanType)(exprId = ExprId(1))
3027
private val b = AttributeReference("B", BooleanType)(exprId = ExprId(2))
3128
private val c = AttributeReference("C", BooleanType)(exprId = ExprId(3))

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BinaryComparisonSimplificationSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.rules._
2828
import org.apache.spark.sql.internal.SQLConf
2929
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
3030

31-
class BinaryComparisonSimplificationSuite extends PlanTest with PredicateHelper {
31+
class BinaryComparisonSimplificationSuite extends PlanTest {
3232

3333
object Optimize extends RuleExecutor[LogicalPlan] {
3434
val batches =

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
2828
import org.apache.spark.sql.catalyst.rules._
2929
import org.apache.spark.sql.types.BooleanType
3030

31-
class BooleanSimplificationSuite extends PlanTest with ExpressionEvalHelper with PredicateHelper {
31+
class BooleanSimplificationSuite extends PlanTest with ExpressionEvalHelper {
3232

3333
object Optimize extends RuleExecutor[LogicalPlan] {
3434
val batches =

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,13 @@ package org.apache.spark.sql.catalyst.optimizer
2020
import org.apache.spark.sql.catalyst.analysis
2121
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
2222
import org.apache.spark.sql.catalyst.dsl.expressions._
23-
import org.apache.spark.sql.catalyst.expressions._
2423
import org.apache.spark.sql.catalyst.expressions.Literal.TrueLiteral
2524
import org.apache.spark.sql.catalyst.plans.PlanTest
2625
import org.apache.spark.sql.catalyst.plans.logical._
2726
import org.apache.spark.sql.catalyst.rules._
2827

2928

30-
class EliminateSubqueryAliasesSuite extends PlanTest with PredicateHelper {
29+
class EliminateSubqueryAliasesSuite extends PlanTest {
3130

3231
object Optimize extends RuleExecutor[LogicalPlan] {
3332
val batches = Batch("EliminateSubqueryAliases", Once, EliminateSubqueryAliases) :: Nil

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PushFoldableIntoBranchesSuite.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@ import org.apache.spark.sql.types.{BooleanType, IntegerType, StringType, Timesta
3232
import org.apache.spark.unsafe.types.CalendarInterval
3333

3434

35-
class PushFoldableIntoBranchesSuite
36-
extends PlanTest with ExpressionEvalHelper with PredicateHelper {
35+
class PushFoldableIntoBranchesSuite extends PlanTest with ExpressionEvalHelper {
3736

3837
object Optimize extends RuleExecutor[LogicalPlan] {
3938
val batches = Batch("PushFoldableIntoBranches", FixedPoint(50),

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/RemoveRedundantAliasAndProjectSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
2525
import org.apache.spark.sql.catalyst.rules._
2626
import org.apache.spark.sql.types.MetadataBuilder
2727

28-
class RemoveRedundantAliasAndProjectSuite extends PlanTest with PredicateHelper {
28+
class RemoveRedundantAliasAndProjectSuite extends PlanTest {
2929

3030
object Optimize extends RuleExecutor[LogicalPlan] {
3131
val batches = Batch(

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.rules._
2828
import org.apache.spark.sql.types.{BooleanType, IntegerType}
2929

3030

31-
class SimplifyConditionalSuite extends PlanTest with ExpressionEvalHelper with PredicateHelper {
31+
class SimplifyConditionalSuite extends PlanTest with ExpressionEvalHelper {
3232

3333
object Optimize extends RuleExecutor[LogicalPlan] {
3434
val batches = Batch("SimplifyConditionals", FixedPoint(50),

sql/core/src/main/scala/org/apache/spark/sql/execution/OptimizeMetadataOnlyQuery.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ case class OptimizeMetadataOnlyQuery(catalog: SessionCatalog) extends Rule[Logic
160160
* A pattern that finds the partitioned table relation node inside the given plan, and returns a
161161
* pair of the partition attributes and the table relation node.
162162
*/
163-
object PartitionedRelation extends PredicateHelper {
163+
object PartitionedRelation {
164164

165165
def unapply(plan: LogicalPlan): Option[(AttributeSet, LogicalPlan)] = {
166166
plan match {

sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
168168
* Supports both equi-joins and non-equi-joins.
169169
* Supports only inner like joins.
170170
*/
171-
object JoinSelection extends Strategy
172-
with PredicateHelper
173-
with JoinSelectionHelper {
171+
object JoinSelection extends Strategy with JoinSelectionHelper {
174172
private val hintErrorHandler = conf.hintErrorHandler
175173

176174
private def checkHintBuildSide(

sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/LogicalQueryStageStrategy.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
package org.apache.spark.sql.execution.adaptive
1919

2020
import org.apache.spark.sql.Strategy
21-
import org.apache.spark.sql.catalyst.expressions.PredicateHelper
2221
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
2322
import org.apache.spark.sql.catalyst.planning.{ExtractEquiJoinKeys, ExtractSingleColumnNullAwareAntiJoin}
2423
import org.apache.spark.sql.catalyst.plans.LeftAnti
@@ -35,7 +34,7 @@ import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BroadcastNes
3534
* stage in case of the larger join child relation finishes before the smaller relation. Note
3635
* that this rule needs to be applied before regular join strategies.
3736
*/
38-
object LogicalQueryStageStrategy extends Strategy with PredicateHelper {
37+
object LogicalQueryStageStrategy extends Strategy {
3938

4039
private def isBroadcastStage(plan: LogicalPlan): Boolean = plan match {
4140
case LogicalQueryStage(_, _: BroadcastQueryStageExec) => true

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
3232
* statistics will be updated. And the partition filters will be kept in the filters of returned
3333
* logical plan.
3434
*/
35-
private[sql] object PruneFileSourcePartitions
36-
extends Rule[LogicalPlan] with PredicateHelper {
35+
private[sql] object PruneFileSourcePartitions extends Rule[LogicalPlan] {
3736

3837
private def rebuildPhysicalOperation(
3938
projects: Seq[NamedExpression],

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/PushDownUtils.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.sql.execution.datasources.v2
1919

2020
import scala.collection.mutable
2121

22-
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Expression, NamedExpression, PredicateHelper, SchemaPruning}
22+
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Expression, NamedExpression, SchemaPruning}
2323
import org.apache.spark.sql.catalyst.util.CharVarcharUtils
2424
import org.apache.spark.sql.connector.expressions.SortOrder
2525
import org.apache.spark.sql.connector.expressions.filter.Predicate
@@ -29,7 +29,7 @@ import org.apache.spark.sql.internal.SQLConf
2929
import org.apache.spark.sql.sources
3030
import org.apache.spark.sql.types.StructType
3131

32-
object PushDownUtils extends PredicateHelper {
32+
object PushDownUtils {
3333
/**
3434
* Pushes down filters to the data source reader
3535
*

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.sql.execution.datasources.v2
1919

2020
import scala.collection.mutable
2121

22-
import org.apache.spark.sql.catalyst.expressions.{aggregate, Alias, AliasHelper, And, Attribute, AttributeReference, AttributeSet, Cast, Expression, IntegerLiteral, Literal, NamedExpression, PredicateHelper, ProjectionOverSchema, SortOrder, SubqueryExpression}
22+
import org.apache.spark.sql.catalyst.expressions.{aggregate, Alias, And, Attribute, AttributeReference, AttributeSet, Cast, Expression, IntegerLiteral, Literal, NamedExpression, PredicateHelper, ProjectionOverSchema, SortOrder, SubqueryExpression}
2323
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
2424
import org.apache.spark.sql.catalyst.optimizer.CollapseProject
2525
import org.apache.spark.sql.catalyst.planning.ScanOperation
@@ -34,7 +34,7 @@ import org.apache.spark.sql.sources
3434
import org.apache.spark.sql.types.{DataType, DecimalType, IntegerType, StructType}
3535
import org.apache.spark.sql.util.SchemaUtils._
3636

37-
object V2ScanRelationPushDown extends Rule[LogicalPlan] with PredicateHelper with AliasHelper {
37+
object V2ScanRelationPushDown extends Rule[LogicalPlan] with PredicateHelper {
3838
import DataSourceV2Implicits._
3939

4040
def apply(plan: LogicalPlan): LogicalPlan = {

sql/core/src/main/scala/org/apache/spark/sql/execution/dynamicpruning/PlanDynamicPruningFilters.scala

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.sql.execution.dynamicpruning
1919

2020
import org.apache.spark.sql.SparkSession
2121
import org.apache.spark.sql.catalyst.expressions
22-
import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeSeq, BindReferences, DynamicPruningExpression, DynamicPruningSubquery, Expression, ListQuery, Literal, PredicateHelper}
22+
import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeSeq, BindReferences, DynamicPruningExpression, DynamicPruningSubquery, Expression, ListQuery, Literal}
2323
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
2424
import org.apache.spark.sql.catalyst.plans.logical.Aggregate
2525
import org.apache.spark.sql.catalyst.plans.physical.BroadcastMode
@@ -34,8 +34,7 @@ import org.apache.spark.sql.execution.joins._
3434
* results of broadcast. For joins that are not planned as broadcast hash joins we keep
3535
* the fallback mechanism with subquery duplicate.
3636
*/
37-
case class PlanDynamicPruningFilters(sparkSession: SparkSession)
38-
extends Rule[SparkPlan] with PredicateHelper {
37+
case class PlanDynamicPruningFilters(sparkSession: SparkSession) extends Rule[SparkPlan] {
3938

4039
/**
4140
* Identify the shape in which keys of a given plan are broadcasted.

sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ object ExtractGroupingPythonUDFFromAggregate extends Rule[LogicalPlan] {
157157
* This has the limitation that the input to the Python UDF is not allowed include attributes from
158158
* multiple child operators.
159159
*/
160-
object ExtractPythonUDFs extends Rule[LogicalPlan] with PredicateHelper {
160+
object ExtractPythonUDFs extends Rule[LogicalPlan] {
161161

162162
private type EvalType = Int
163163
private type EvalTypeChecker = EvalType => Boolean

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import org.apache.spark.SparkException
2929
import org.apache.spark.sql._
3030
import org.apache.spark.sql.catalyst.InternalRow
3131
import org.apache.spark.sql.catalyst.catalog.BucketSpec
32-
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionSet, PredicateHelper}
32+
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionSet}
3333
import org.apache.spark.sql.catalyst.util
3434
import org.apache.spark.sql.execution.{DataSourceScanExec, FileSourceScanExec, SparkPlan}
3535
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation
@@ -40,7 +40,7 @@ import org.apache.spark.sql.test.SharedSparkSession
4040
import org.apache.spark.sql.types.{IntegerType, LongType, StructField, StructType}
4141
import org.apache.spark.util.Utils
4242

43-
class FileSourceStrategySuite extends QueryTest with SharedSparkSession with PredicateHelper {
43+
class FileSourceStrategySuite extends QueryTest with SharedSparkSession {
4444
import testImplicits._
4545

4646
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")

sql/hive/src/test/scala/org/apache/spark/sql/sources/SimpleTextHadoopFsRelationSuite.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,9 @@ package org.apache.spark.sql.sources
2020
import org.apache.hadoop.fs.Path
2121

2222
import org.apache.spark.sql.catalyst.catalog.CatalogUtils
23-
import org.apache.spark.sql.catalyst.expressions.PredicateHelper
2423
import org.apache.spark.sql.types._
2524

26-
class SimpleTextHadoopFsRelationSuite extends HadoopFsRelationTest with PredicateHelper {
25+
class SimpleTextHadoopFsRelationSuite extends HadoopFsRelationTest {
2726
override val dataSourceName: String = classOf[SimpleTextSource].getCanonicalName
2827

2928
// We have a very limited number of supported types at here since it is just for a

0 commit comments

Comments
 (0)