diff --git a/.github/actions/java-test/action.yaml b/.github/actions/java-test/action.yaml index f82f05b23f..e1efd9fce9 100644 --- a/.github/actions/java-test/action.yaml +++ b/.github/actions/java-test/action.yaml @@ -49,7 +49,7 @@ runs: - name: Run Maven compile shell: bash run: | - ./mvnw -B compile test-compile scalafix:scalafix -Psemanticdb ${{ inputs.maven_opts }} + ./mvnw -B compile test-compile scalafix:scalafix -Dscalafix.mode=CHECK -Psemanticdb ${{ inputs.maven_opts }} - name: Run tests shell: bash diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 25a65999ef..7105f1d1aa 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -84,3 +84,7 @@ in the respective source code, e.g., `CometTPCHQueryBenchmark`. ## Debugging Comet is a multi-language project with native code written in Rust and JVM code written in Java and Scala. It is possible to debug both native and JVM code concurrently as described in the [DEBUGGING guide](DEBUGGING.md) + +## Submitting a Pull Request +Comet uses [Scalafix](https://github.com/scalacenter/scalafix) and [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-maven) to +automatically format the code. Before submitting a pull request, you can simply run `make format` to format the code. \ No newline at end of file diff --git a/Makefile b/Makefile index 6f599a0817..ca5d7564a3 100644 --- a/Makefile +++ b/Makefile @@ -38,6 +38,7 @@ clean: bench: cd core && RUSTFLAGS="-Ctarget-cpu=native" cargo bench $(filter-out $@,$(MAKECMDGOALS)) format: + cd core && cargo fmt ./mvnw compile test-compile scalafix:scalafix -Psemanticdb $(PROFILES) ./mvnw spotless:apply $(PROFILES) diff --git a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala index 87c2265fcb..5720b69354 100644 --- a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala +++ b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala @@ -26,7 +26,6 @@ import org.apache.spark.internal.Logging import org.apache.spark.network.util.ByteUnit import org.apache.spark.sql.SparkSession import org.apache.spark.sql.SparkSessionExtensions -import org.apache.spark.sql.catalyst.expressions.AttributeReference import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.comet._ import org.apache.spark.sql.comet.execution.shuffle.{CometColumnarShuffle, CometNativeShuffle} diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala index 08a499b06d..902f7037f8 100644 --- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala +++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala @@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke import org.apache.spark.sql.catalyst.optimizer.NormalizeNaNAndZero import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, SinglePartition} import org.apache.spark.sql.catalyst.util.CharVarcharCodegenUtils -import org.apache.spark.sql.comet.{CometHashAggregateExec, CometPlan, CometSinkPlaceHolder, DecimalPrecision} +import org.apache.spark.sql.comet.{CometSinkPlaceHolder, DecimalPrecision} import org.apache.spark.sql.execution import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.aggregate.HashAggregateExec diff --git a/spark/src/test/scala/org/apache/comet/CometCastSuite.scala b/spark/src/test/scala/org/apache/comet/CometCastSuite.scala index 565d2264b7..317371fb90 100644 --- a/spark/src/test/scala/org/apache/comet/CometCastSuite.scala +++ b/spark/src/test/scala/org/apache/comet/CometCastSuite.scala @@ -90,13 +90,13 @@ class CometCastSuite extends CometTestBase with AdaptiveSparkPlanHelper { Range(0, len).map(_ => chars.charAt(r.nextInt(chars.length))).mkString } - private def fuzzCastFromString(chars: String, maxLen: Int, toType: DataType) { + private def fuzzCastFromString(chars: String, maxLen: Int, toType: DataType): Unit = { val r = new Random(0) val inputs = Range(0, 10000).map(_ => genString(r, chars, maxLen)) castTest(inputs.toDF("a"), toType) } - private def castTest(input: DataFrame, toType: DataType) { + private def castTest(input: DataFrame, toType: DataType): Unit = { withTempPath { dir => val df = roundtripParquet(input, dir) .withColumn("converted", col("a").cast(toType)) diff --git a/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala b/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala index 7424f1bfd5..803f30bed0 100644 --- a/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala +++ b/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala @@ -24,7 +24,7 @@ import java.util import org.apache.hadoop.fs.Path import org.apache.spark.sql.{CometTestBase, DataFrame, Row} import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper -import org.apache.spark.sql.functions.{expr, lit} +import org.apache.spark.sql.functions.expr import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.SESSION_LOCAL_TIMEZONE import org.apache.spark.sql.types.{Decimal, DecimalType, StructType}