diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 7343ca86d3465..eaad5c837f0d5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -2671,8 +2671,8 @@ object SQLConf { .booleanConf .createWithDefault(false) - val ANALYZE_PARTITION_STATS_ENABLED = - buildConf("spark.sql.statistics.update.partitionStats.enabled") + val UPDATE_PART_STATS_IN_ANALYZE_TABLE_ENABLED = + buildConf("spark.sql.statistics.updatePartitionStatsInAnalyzeTable.enabled") .doc("When this config is enabled, Spark will also update partition statistics in analyze " + "table command (i.e., ANALYZE TABLE .. COMPUTE STATISTICS [NOSCAN]). Note the command " + "will also become more expensive. When this config is disabled, Spark will only " + @@ -5109,7 +5109,8 @@ class SQLConf extends Serializable with Logging with SqlApiConf { def autoSizeUpdateEnabled: Boolean = getConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED) - def analyzePartitionStatsEnabled: Boolean = getConf(SQLConf.ANALYZE_PARTITION_STATS_ENABLED) + def updatePartStatsInAnalyzeTableEnabled: Boolean = + getConf(SQLConf.UPDATE_PART_STATS_IN_ANALYZE_TABLE_ENABLED) def joinReorderEnabled: Boolean = getConf(SQLConf.JOIN_REORDER_ENABLED) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala index a680127c0ee69..2a427bff9e53f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala @@ -231,7 +231,7 @@ object CommandUtils extends Logging { tableIdent: TableIdentifier, noScan: Boolean): Unit = { val sessionState = sparkSession.sessionState - val partitionStatsEnabled = sessionState.conf.analyzePartitionStatsEnabled + val partitionStatsEnabled = sessionState.conf.updatePartStatsInAnalyzeTableEnabled val db = tableIdent.database.getOrElse(sessionState.catalog.getCurrentDatabase) val tableIdentWithDB = TableIdentifier(tableIdent.table, Some(db)) val tableMeta = sessionState.catalog.getTableMetadata(tableIdentWithDB) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala index 4b9e478bc89f8..e217ac5544b9a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala @@ -373,7 +373,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto } Seq(true, false).foreach { partitionStatsEnabled => - withSQLConf(SQLConf.ANALYZE_PARTITION_STATS_ENABLED.key -> partitionStatsEnabled.toString) { + withSQLConf(SQLConf.UPDATE_PART_STATS_IN_ANALYZE_TABLE_ENABLED.key -> + partitionStatsEnabled.toString) { withTable(tableName) { withTempPath { path => // Create a table with 3 partitions all located under a directory 'path' @@ -389,7 +390,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto partitionDates.foreach { ds => sql(s"ALTER TABLE $tableName ADD PARTITION (ds='$ds') LOCATION '$path/ds=$ds'") - sql("SELECT * from src").write.mode(SaveMode.Overwrite) + sql("SELECT * FROM src").write.mode(SaveMode.Overwrite) .format("parquet").save(s"$path/ds=$ds") }