Skip to content

Commit 6095fed

Browse files
committed
[SPARK-7927] whitespace fixes for SQL core.
1 parent 3e312a5 commit 6095fed

20 files changed

+83
-80
lines changed

sql/core/src/main/scala/org/apache/spark/sql/Column.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
349349
* @group expr_ops
350350
* @since 1.4.0
351351
*/
352-
def when(condition: Column, value: Any):Column = this.expr match {
352+
def when(condition: Column, value: Any): Column = this.expr match {
353353
case CaseWhen(branches: Seq[Expression]) =>
354354
CaseWhen(branches ++ Seq(lit(condition).expr, lit(value).expr))
355355
case _ =>
@@ -378,7 +378,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
378378
* @group expr_ops
379379
* @since 1.4.0
380380
*/
381-
def otherwise(value: Any):Column = this.expr match {
381+
def otherwise(value: Any): Column = this.expr match {
382382
case CaseWhen(branches: Seq[Expression]) =>
383383
if (branches.size % 2 == 0) {
384384
CaseWhen(branches :+ lit(value).expr)

sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ class DataFrame private[sql](
255255
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
256256
Column(oldAttribute).as(newName)
257257
}
258-
select(newCols :_*)
258+
select(newCols : _*)
259259
}
260260

261261
/**
@@ -500,7 +500,7 @@ class DataFrame private[sql](
500500
*/
501501
@scala.annotation.varargs
502502
def sort(sortCol: String, sortCols: String*): DataFrame = {
503-
sort((sortCol +: sortCols).map(apply) :_*)
503+
sort((sortCol +: sortCols).map(apply) : _*)
504504
}
505505

506506
/**
@@ -531,7 +531,7 @@ class DataFrame private[sql](
531531
* @since 1.3.0
532532
*/
533533
@scala.annotation.varargs
534-
def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols :_*)
534+
def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols : _*)
535535

536536
/**
537537
* Returns a new [[DataFrame]] sorted by the given expressions.
@@ -540,7 +540,7 @@ class DataFrame private[sql](
540540
* @since 1.3.0
541541
*/
542542
@scala.annotation.varargs
543-
def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs :_*)
543+
def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs : _*)
544544

545545
/**
546546
* Selects column based on the column name and return it as a [[Column]].
@@ -611,7 +611,7 @@ class DataFrame private[sql](
611611
* @since 1.3.0
612612
*/
613613
@scala.annotation.varargs
614-
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) :_*)
614+
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
615615

616616
/**
617617
* Selects a set of SQL expressions. This is a variant of `select` that accepts
@@ -825,7 +825,7 @@ class DataFrame private[sql](
825825
* @since 1.3.0
826826
*/
827827
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
828-
groupBy().agg(aggExpr, aggExprs :_*)
828+
groupBy().agg(aggExpr, aggExprs : _*)
829829
}
830830

831831
/**
@@ -863,7 +863,7 @@ class DataFrame private[sql](
863863
* @since 1.3.0
864864
*/
865865
@scala.annotation.varargs
866-
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs :_*)
866+
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
867867

868868
/**
869869
* Returns a new [[DataFrame]] by taking the first `n` rows. The difference between this function
@@ -1039,7 +1039,7 @@ class DataFrame private[sql](
10391039
val name = field.name
10401040
if (resolver(name, colName)) col.as(colName) else Column(name)
10411041
}
1042-
select(colNames :_*)
1042+
select(colNames : _*)
10431043
} else {
10441044
select(Column("*"), col.as(colName))
10451045
}
@@ -1262,7 +1262,7 @@ class DataFrame private[sql](
12621262
* @group action
12631263
* @since 1.3.0
12641264
*/
1265-
override def collectAsList(): java.util.List[Row] = java.util.Arrays.asList(rdd.collect() :_*)
1265+
override def collectAsList(): java.util.List[Row] = java.util.Arrays.asList(rdd.collect() : _*)
12661266

12671267
/**
12681268
* Returns the number of rows in the [[DataFrame]].

sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,5 @@ private[sql] case class DataFrameHolder(df: DataFrame) {
2828
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
2929
def toDF(): DataFrame = df
3030

31-
def toDF(colNames: String*): DataFrame = df.toDF(colNames :_*)
31+
def toDF(colNames: String*): DataFrame = df.toDF(colNames : _*)
3232
}

sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ class GroupedData protected[sql](
247247
*/
248248
@scala.annotation.varargs
249249
def mean(colNames: String*): DataFrame = {
250-
aggregateNumericColumns(colNames:_*)(Average)
250+
aggregateNumericColumns(colNames : _*)(Average)
251251
}
252252

253253
/**
@@ -259,7 +259,7 @@ class GroupedData protected[sql](
259259
*/
260260
@scala.annotation.varargs
261261
def max(colNames: String*): DataFrame = {
262-
aggregateNumericColumns(colNames:_*)(Max)
262+
aggregateNumericColumns(colNames : _*)(Max)
263263
}
264264

265265
/**
@@ -271,7 +271,7 @@ class GroupedData protected[sql](
271271
*/
272272
@scala.annotation.varargs
273273
def avg(colNames: String*): DataFrame = {
274-
aggregateNumericColumns(colNames:_*)(Average)
274+
aggregateNumericColumns(colNames : _*)(Average)
275275
}
276276

277277
/**
@@ -283,7 +283,7 @@ class GroupedData protected[sql](
283283
*/
284284
@scala.annotation.varargs
285285
def min(colNames: String*): DataFrame = {
286-
aggregateNumericColumns(colNames:_*)(Min)
286+
aggregateNumericColumns(colNames : _*)(Min)
287287
}
288288

289289
/**
@@ -295,6 +295,6 @@ class GroupedData protected[sql](
295295
*/
296296
@scala.annotation.varargs
297297
def sum(colNames: String*): DataFrame = {
298-
aggregateNumericColumns(colNames:_*)(Sum)
298+
aggregateNumericColumns(colNames : _*)(Sum)
299299
}
300300
}

sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
298298
*/
299299
implicit class StringToColumn(val sc: StringContext) {
300300
def $(args: Any*): ColumnName = {
301-
new ColumnName(sc.s(args :_*))
301+
new ColumnName(sc.s(args : _*))
302302
}
303303
}
304304

sql/core/src/main/scala/org/apache/spark/sql/SparkSQLParser.scala

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -54,15 +54,15 @@ private[sql] class SparkSQLParser(fallback: String => LogicalPlan) extends Abstr
5454
}
5555
}
5656

57-
protected val AS = Keyword("AS")
58-
protected val CACHE = Keyword("CACHE")
59-
protected val CLEAR = Keyword("CLEAR")
60-
protected val IN = Keyword("IN")
61-
protected val LAZY = Keyword("LAZY")
62-
protected val SET = Keyword("SET")
63-
protected val SHOW = Keyword("SHOW")
64-
protected val TABLE = Keyword("TABLE")
65-
protected val TABLES = Keyword("TABLES")
57+
protected val AS = Keyword("AS")
58+
protected val CACHE = Keyword("CACHE")
59+
protected val CLEAR = Keyword("CLEAR")
60+
protected val IN = Keyword("IN")
61+
protected val LAZY = Keyword("LAZY")
62+
protected val SET = Keyword("SET")
63+
protected val SHOW = Keyword("SHOW")
64+
protected val TABLE = Keyword("TABLE")
65+
protected val TABLES = Keyword("TABLES")
6666
protected val UNCACHE = Keyword("UNCACHE")
6767

6868
override protected lazy val start: Parser[LogicalPlan] = cache | uncache | set | show | others

sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ private[sql] case class InMemoryColumnarTableScan(
236236
case GreaterThanOrEqual(a: AttributeReference, l: Literal) => l <= statsFor(a).upperBound
237237
case GreaterThanOrEqual(l: Literal, a: AttributeReference) => statsFor(a).lowerBound <= l
238238

239-
case IsNull(a: Attribute) => statsFor(a).nullCount > 0
239+
case IsNull(a: Attribute) => statsFor(a).nullCount > 0
240240
case IsNotNull(a: Attribute) => statsFor(a).count - statsFor(a).nullCount > 0
241241
}
242242

sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[
296296
.sliding(2)
297297
.map {
298298
case Seq(a) => true
299-
case Seq(a,b) => a compatibleWith b
299+
case Seq(a, b) => a.compatibleWith(b)
300300
}.exists(!_)
301301

302302
// Adds Exchange or Sort operators as required

sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -243,8 +243,9 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
243243
case (predicate, None) => predicate
244244
// Filter needs to be applied above when it contains partitioning
245245
// columns
246-
case (predicate, _) if(!predicate.references.map(_.name).toSet
247-
.intersect (partitionColNames).isEmpty) => predicate
246+
case (predicate, _)
247+
if !predicate.references.map(_.name).toSet.intersect(partitionColNames).isEmpty =>
248+
predicate
248249
}
249250
}
250251
} else {
@@ -270,7 +271,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
270271
projectList,
271272
filters,
272273
identity[Seq[Expression]], // All filters still need to be evaluated.
273-
InMemoryColumnarTableScan(_, filters, mem)) :: Nil
274+
InMemoryColumnarTableScan(_, filters, mem)) :: Nil
274275
case _ => Nil
275276
}
276277
}

sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastLeftSemiJoinHash.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ case class BroadcastLeftSemiJoinHash(
3939
override def output: Seq[Attribute] = left.output
4040

4141
protected override def doExecute(): RDD[Row] = {
42-
val buildIter= buildPlan.execute().map(_.copy()).collect().toIterator
42+
val buildIter = buildPlan.execute().map(_.copy()).collect().toIterator
4343
val hashSet = new java.util.HashSet[Row]()
4444
var currentRow: Row = null
4545

0 commit comments

Comments
 (0)