From 43c4726fa8b0de623d5563720c96632193262ec2 Mon Sep 17 00:00:00 2001 From: Liang-Chi Hsieh Date: Mon, 22 Jun 2020 21:48:36 -0700 Subject: [PATCH] Address comments. --- .../main/scala/org/apache/spark/sql/Dataset.scala | 12 ++++++------ .../apache/spark/sql/execution/SparkStrategies.scala | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index 64c7ac6154565..6f97121d88ede 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -2993,7 +2993,7 @@ class Dataset[T] private[sql]( private def repartitionByExpression( numPartitions: Option[Int], - partitionExprs: Column*): Dataset[T] = { + partitionExprs: Seq[Column]): Dataset[T] = { // The underlying `LogicalPlan` operator special-cases all-`SortOrder` arguments. // However, we don't want to complicate the semantics of this API method. // Instead, let's give users a friendly error message, pointing them to the new method. @@ -3018,7 +3018,7 @@ class Dataset[T] private[sql]( */ @scala.annotation.varargs def repartition(numPartitions: Int, partitionExprs: Column*): Dataset[T] = { - repartitionByExpression(Some(numPartitions), partitionExprs: _*) + repartitionByExpression(Some(numPartitions), partitionExprs) } /** @@ -3033,12 +3033,12 @@ class Dataset[T] private[sql]( */ @scala.annotation.varargs def repartition(partitionExprs: Column*): Dataset[T] = { - repartitionByExpression(None, partitionExprs: _*) + repartitionByExpression(None, partitionExprs) } private def repartitionByRange( numPartitions: Option[Int], - partitionExprs: Column*): Dataset[T] = { + partitionExprs: Seq[Column]): Dataset[T] = { require(partitionExprs.nonEmpty, "At least one partition-by expression must be specified.") val sortOrder: Seq[SortOrder] = partitionExprs.map(_.expr match { case expr: SortOrder => expr @@ -3068,7 +3068,7 @@ class Dataset[T] private[sql]( */ @scala.annotation.varargs def repartitionByRange(numPartitions: Int, partitionExprs: Column*): Dataset[T] = { - repartitionByRange(Some(numPartitions), partitionExprs: _*) + repartitionByRange(Some(numPartitions), partitionExprs) } /** @@ -3090,7 +3090,7 @@ class Dataset[T] private[sql]( */ @scala.annotation.varargs def repartitionByRange(partitionExprs: Column*): Dataset[T] = { - repartitionByRange(None, partitionExprs: _*) + repartitionByRange(None, partitionExprs) } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index 8d7a82061caa0..5341b4778c539 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -687,7 +687,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { case r: logical.RepartitionByExpression => val canChangeNumParts = r.optNumPartitions.isEmpty exchange.ShuffleExchangeExec( - r.partitioning, planLater(r.child), canChangeNumPartitions = canChangeNumParts) :: Nil + r.partitioning, planLater(r.child), canChangeNumParts) :: Nil case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil case r: LogicalRDD => RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil