Skip to content

Commit 0b56f77

Browse files
committed
Fixed coding style issues in sql/core
1 parent fae7b02 commit 0b56f77

File tree

12 files changed

+33
-32
lines changed

12 files changed

+33
-32
lines changed

sql/core/src/main/scala/org/apache/spark/rdd/PartitionLocalRDDFunctions.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@ import scala.language.implicitConversions
2222
import scala.reflect._
2323
import scala.collection.mutable.ArrayBuffer
2424

25-
import org.apache.spark._
26-
import org.apache.spark.SparkContext._
25+
import org.apache.spark.{Aggregator, InterruptibleIterator, Logging}
2726
import org.apache.spark.util.collection.AppendOnlyMap
2827

28+
/* Implicit conversions */
29+
import org.apache.spark.SparkContext._
30+
2931
/**
3032
* Extra functions on RDDs that perform only local operations. These can be used when data has
3133
* already been partitioned correctly.

sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import org.apache.spark.sql.catalyst.expressions._
2323
import org.apache.spark.sql.catalyst.plans.logical._
2424
import org.apache.spark.sql.catalyst.plans.{Inner, JoinType}
2525
import org.apache.spark.sql.catalyst.types.BooleanType
26-
import org.apache.spark.{OneToOneDependency, Dependency, Partition, TaskContext}
26+
import org.apache.spark.{Dependency, OneToOneDependency, Partition, TaskContext}
2727

2828
/**
2929
* <span class="badge" style="float: right; background-color: darkblue;">ALPHA COMPONENT</span>

sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ package execution
2020

2121

2222
import com.esotericsoftware.kryo.{Kryo, Serializer}
23-
import com.esotericsoftware.kryo.io.{Output, Input}
23+
import com.esotericsoftware.kryo.io.{Input, Output}
2424

2525
import org.apache.spark.{SparkConf, RangePartitioner, HashPartitioner}
2626
import org.apache.spark.rdd.ShuffledRDD

sql/core/src/main/scala/org/apache/spark/sql/execution/aggregates.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,13 @@ package org.apache.spark.sql
1919
package execution
2020

2121
import org.apache.spark.SparkContext
22-
import org.apache.spark.rdd.PartitionLocalRDDFunctions._
2322
import org.apache.spark.sql.catalyst.errors._
2423
import org.apache.spark.sql.catalyst.expressions._
2524
import org.apache.spark.sql.catalyst.plans.physical._
2625

26+
/* Implicit conversions */
27+
import org.apache.spark.rdd.PartitionLocalRDDFunctions._
28+
2729
/**
2830
* Groups input data by `groupingExpressions` and computes the `aggregateExpressions` for each
2931
* group.

sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import org.apache.spark.SparkContext
2525

2626
import org.apache.spark.sql.catalyst.errors._
2727
import org.apache.spark.sql.catalyst.expressions._
28-
import org.apache.spark.sql.catalyst.plans.physical.{UnspecifiedDistribution, OrderedDistribution}
28+
import org.apache.spark.sql.catalyst.plans.physical.{OrderedDistribution, UnspecifiedDistribution}
2929
import org.apache.spark.sql.catalyst.ScalaReflection
3030

3131
case class Project(projectList: Seq[NamedExpression], child: SparkPlan) extends UnaryNode {

sql/core/src/main/scala/org/apache/spark/sql/execution/package.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,5 +25,4 @@ package org.apache.spark.sql
2525
* documented here in order to make it easier for others to understand the performance
2626
* characteristics of query plans that are generated by Spark SQL.
2727
*/
28-
package object execution {
29-
}
28+
package object execution

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,23 +23,22 @@ import scala.collection.JavaConversions._
2323

2424
import org.apache.hadoop.conf.Configuration
2525
import org.apache.hadoop.fs.permission.FsAction
26-
import org.apache.hadoop.fs.{Path, FileSystem}
26+
import org.apache.hadoop.fs.{FileSystem, Path}
2727
import org.apache.hadoop.mapreduce.Job
2828

2929
import parquet.hadoop.metadata.{FileMetaData, ParquetMetadata}
3030
import parquet.hadoop.util.ContextUtil
31-
import parquet.hadoop.{Footer, ParquetFileWriter, ParquetFileReader}
31+
import parquet.hadoop.{Footer, ParquetFileReader, ParquetFileWriter}
3232
import parquet.io.api.{Binary, RecordConsumer}
3333
import parquet.schema.PrimitiveType.{PrimitiveTypeName => ParquetPrimitiveTypeName}
3434
import parquet.schema.Type.Repetition
35-
import parquet.schema.{MessageTypeParser, MessageType}
35+
import parquet.schema.{MessageType, MessageTypeParser}
3636
import parquet.schema.{PrimitiveType => ParquetPrimitiveType}
3737
import parquet.schema.{Type => ParquetType}
3838

3939
import org.apache.spark.sql.catalyst.analysis.UnresolvedException
40-
import org.apache.spark.sql.catalyst.expressions.{Row, AttributeReference, Attribute}
41-
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, BaseRelation}
42-
import org.apache.spark.sql.catalyst.types.ArrayType
40+
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Row}
41+
import org.apache.spark.sql.catalyst.plans.logical.{BaseRelation, LogicalPlan}
4342
import org.apache.spark.sql.catalyst.types._
4443

4544
/**

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,14 @@ import org.apache.hadoop.mapreduce._
2727
import org.apache.hadoop.mapreduce.lib.output.{FileOutputFormat => NewFileOutputFormat}
2828

2929
import parquet.hadoop.util.ContextUtil
30-
import parquet.hadoop.{ParquetOutputFormat, ParquetInputFormat}
30+
import parquet.hadoop.{ParquetInputFormat, ParquetOutputFormat}
3131
import parquet.io.InvalidRecordException
3232
import parquet.schema.MessageType
3333

3434
import org.apache.spark.rdd.RDD
35-
import org.apache.spark.sql.catalyst.expressions.{Row, Attribute, Expression}
36-
import org.apache.spark.sql.execution.{SparkPlan, UnaryNode, LeafNode}
37-
import org.apache.spark.{TaskContext, SerializableWritable, SparkContext}
35+
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, Row}
36+
import org.apache.spark.sql.execution.{LeafNode, SparkPlan, UnaryNode}
37+
import org.apache.spark.{SerializableWritable, SparkContext, TaskContext}
3838

3939
/**
4040
* Parquet table scan operator. Imports the file that backs the given

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,12 @@ import org.apache.hadoop.conf.Configuration
2222
import parquet.column.ParquetProperties
2323
import parquet.hadoop.ParquetOutputFormat
2424
import parquet.hadoop.api.ReadSupport.ReadContext
25-
import parquet.hadoop.api.{WriteSupport, ReadSupport}
25+
import parquet.hadoop.api.{ReadSupport, WriteSupport}
2626
import parquet.io.api._
27-
import parquet.schema.{MessageTypeParser, MessageType}
27+
import parquet.schema.{MessageType, MessageTypeParser}
2828

2929
import org.apache.spark.Logging
30-
import org.apache.spark.sql.catalyst.expressions.{Row, Attribute}
30+
import org.apache.spark.sql.catalyst.expressions.{Attribute, Row}
3131
import org.apache.spark.sql.catalyst.types._
3232

3333
/**

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import org.apache.hadoop.mapreduce.Job
2323

2424
import parquet.hadoop.ParquetWriter
2525
import parquet.hadoop.util.ContextUtil
26-
import parquet.schema.{MessageTypeParser, MessageType}
26+
import parquet.schema.{MessageType, MessageTypeParser}
2727

2828
import org.apache.spark.sql.catalyst.expressions.GenericRow
2929
import org.apache.spark.sql.catalyst.util.getTempFilePath

sql/core/src/test/scala/org/apache/spark/sql/execution/TgfSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@
1818
package org.apache.spark.sql
1919
package execution
2020

21+
import org.apache.spark.sql.QueryTest
2122
import org.apache.spark.sql.catalyst.expressions._
2223
import org.apache.spark.sql.catalyst.plans._
23-
import org.apache.spark.sql.test._
2424

25-
import TestSQLContext._
26-
import org.apache.spark.sql.QueryTest
25+
/* Implicit conversions */
26+
import org.apache.spark.sql.test.TestSQLContext._
2727

2828
/**
2929
* This is an example TGF that uses UnresolvedAttributes 'name and 'age to access specific columns

sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,17 @@ package org.apache.spark.sql.parquet
1919

2020
import org.scalatest.{BeforeAndAfterAll, FunSuite}
2121

22+
import org.apache.hadoop.fs.{FileSystem, Path}
23+
import org.apache.hadoop.mapreduce.Job
24+
import parquet.hadoop.ParquetFileWriter
25+
import parquet.hadoop.util.ContextUtil
26+
import parquet.schema.MessageTypeParser
27+
2228
import org.apache.spark.rdd.RDD
2329
import org.apache.spark.sql.catalyst.expressions.Row
2430
import org.apache.spark.sql.catalyst.util.getTempFilePath
2531
import org.apache.spark.sql.test.TestSQLContext
2632

27-
import org.apache.hadoop.mapreduce.Job
28-
import org.apache.hadoop.fs.{Path, FileSystem}
29-
30-
import parquet.schema.MessageTypeParser
31-
import parquet.hadoop.ParquetFileWriter
32-
import parquet.hadoop.util.ContextUtil
33-
3433
class ParquetQuerySuite extends FunSuite with BeforeAndAfterAll {
3534
override def beforeAll() {
3635
ParquetTestData.writeFile()

0 commit comments

Comments
 (0)