From db599a5cb1e0ac19cfb19806c06178337bf0b479 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Sun, 15 Nov 2020 18:03:11 -0500 Subject: [PATCH] style: line length exceeds 100 characters Signed-off-by: Josh Soref --- .../test/scala/org/apache/spark/sql/DatasetCacheSuite.scala | 3 ++- .../execution/datasources/parquet/ParquetQuerySuite.scala | 4 ++-- .../org/apache/spark/sql/sources/BucketedReadSuite.scala | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala index cb4f18b32a469..009ccb9a45354 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala @@ -113,7 +113,8 @@ class DatasetCacheSuite extends QueryTest ds.unpersist(blocking = true) assert(ds.storageLevel == StorageLevel.NONE, "The Dataset ds should not be cached.") aggregated.unpersist(blocking = true) - assert(aggregated.storageLevel == StorageLevel.NONE, "The Dataset aggregated should not be cached.") + assert(aggregated.storageLevel == StorageLevel.NONE, + "The Dataset aggregated should not be cached.") } test("persist and then withColumn") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala index 705b94e7743e6..8f85fe3c52583 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala @@ -857,7 +857,7 @@ class ParquetV1QuerySuite extends ParquetQuerySuite { val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*) df.write.mode(SaveMode.Overwrite).parquet(path) - // do not return batch, because whole stage codegen is disabled for wide table (>200 columns) + // do not return batch - whole stage codegen is disabled for wide table (>200 columns) val df2 = spark.read.parquet(path) val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get assert(!fileScan2.asInstanceOf[FileSourceScanExec].supportsColumnar) @@ -890,7 +890,7 @@ class ParquetV2QuerySuite extends ParquetQuerySuite { val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*) df.write.mode(SaveMode.Overwrite).parquet(path) - // do not return batch, because whole stage codegen is disabled for wide table (>200 columns) + // do not return batch - whole stage codegen is disabled for wide table (>200 columns) val df2 = spark.read.parquet(path) val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[BatchScanExec]).get val parquetScan2 = fileScan2.asInstanceOf[BatchScanExec].scan.asInstanceOf[ParquetScan] diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala index 4b3d51d28137d..bd585236c66e5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala @@ -644,7 +644,8 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { aggregated.sort("i", "j"), df1.groupBy("i", "j").agg(max("k")).sort("i", "j")) - assert(aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) + assert( + aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) } } @@ -684,7 +685,8 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { aggregated.sort("i", "j"), df1.groupBy("i", "j").agg(max("k")).sort("i", "j")) - assert(aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) + assert( + aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) } }