@@ -408,7 +408,7 @@ object SQLConf {
408
408
" reduce IO and improve performance. Note, multiple continuous blocks exist in single " +
409
409
s " fetch request only happen when ' ${ADAPTIVE_EXECUTION_ENABLED .key}' and " +
410
410
s " ' ${REDUCE_POST_SHUFFLE_PARTITIONS_ENABLED .key}' is enabled, this feature also depends " +
411
- " on a relocatable serializer, the concatenation support codec in use and the new version" +
411
+ " on a relocatable serializer, the concatenation support codec in use and the new version " +
412
412
" shuffle fetch protocol." )
413
413
.booleanConf
414
414
.createWithDefault(true )
@@ -557,7 +557,7 @@ object SQLConf {
557
557
val PARQUET_INT64_AS_TIMESTAMP_MILLIS = buildConf(" spark.sql.parquet.int64AsTimestampMillis" )
558
558
.doc(s " (Deprecated since Spark 2.3, please set ${PARQUET_OUTPUT_TIMESTAMP_TYPE .key}.) " +
559
559
" When true, timestamp values will be stored as INT64 with TIMESTAMP_MILLIS as the " +
560
- " extended type. In this mode, the microsecond portion of the timestamp value will be" +
560
+ " extended type. In this mode, the microsecond portion of the timestamp value will be " +
561
561
" truncated." )
562
562
.booleanConf
563
563
.createWithDefault(false )
@@ -638,8 +638,9 @@ object SQLConf {
638
638
val PARQUET_OUTPUT_COMMITTER_CLASS = buildConf(" spark.sql.parquet.output.committer.class" )
639
639
.doc(" The output committer class used by Parquet. The specified class needs to be a " +
640
640
" subclass of org.apache.hadoop.mapreduce.OutputCommitter. Typically, it's also a subclass " +
641
- " of org.apache.parquet.hadoop.ParquetOutputCommitter. If it is not, then metadata summaries" +
642
- " will never be created, irrespective of the value of parquet.summary.metadata.level" )
641
+ " of org.apache.parquet.hadoop.ParquetOutputCommitter. If it is not, then metadata " +
642
+ " summaries will never be created, irrespective of the value of " +
643
+ " parquet.summary.metadata.level" )
643
644
.internal()
644
645
.stringConf
645
646
.createWithDefault(" org.apache.parquet.hadoop.ParquetOutputCommitter" )
@@ -676,7 +677,7 @@ object SQLConf {
676
677
.createWithDefault(" snappy" )
677
678
678
679
val ORC_IMPLEMENTATION = buildConf(" spark.sql.orc.impl" )
679
- .doc(" When native, use the native version of ORC support instead of the ORC library in Hive." +
680
+ .doc(" When native, use the native version of ORC support instead of the ORC library in Hive. " +
680
681
" It is 'hive' by default prior to Spark 2.4." )
681
682
.internal()
682
683
.stringConf
@@ -1225,8 +1226,8 @@ object SQLConf {
1225
1226
buildConf(" spark.sql.streaming.multipleWatermarkPolicy" )
1226
1227
.doc(" Policy to calculate the global watermark value when there are multiple watermark " +
1227
1228
" operators in a streaming query. The default value is 'min' which chooses " +
1228
- " the minimum watermark reported across multiple operators. Other alternative value is" +
1229
- " 'max' which chooses the maximum across multiple operators." +
1229
+ " the minimum watermark reported across multiple operators. Other alternative value is " +
1230
+ " 'max' which chooses the maximum across multiple operators. " +
1230
1231
" Note: This configuration cannot be changed between query restarts from the same " +
1231
1232
" checkpoint location." )
1232
1233
.stringConf
@@ -1381,7 +1382,7 @@ object SQLConf {
1381
1382
buildConf(" spark.sql.statistics.parallelFileListingInStatsComputation.enabled" )
1382
1383
.internal()
1383
1384
.doc(" When true, SQL commands use parallel file listing, " +
1384
- " as opposed to single thread listing." +
1385
+ " as opposed to single thread listing. " +
1385
1386
" This usually speeds up commands that need to list many directories." )
1386
1387
.booleanConf
1387
1388
.createWithDefault(true )
@@ -1702,21 +1703,21 @@ object SQLConf {
1702
1703
1703
1704
val CONCAT_BINARY_AS_STRING = buildConf(" spark.sql.function.concatBinaryAsString" )
1704
1705
.doc(" When this option is set to false and all inputs are binary, `functions.concat` returns " +
1705
- " an output as binary. Otherwise, it returns as a string. " )
1706
+ " an output as binary. Otherwise, it returns as a string." )
1706
1707
.booleanConf
1707
1708
.createWithDefault(false )
1708
1709
1709
1710
val ELT_OUTPUT_AS_STRING = buildConf(" spark.sql.function.eltOutputAsString" )
1710
1711
.doc(" When this option is set to false and all inputs are binary, `elt` returns " +
1711
- " an output as binary. Otherwise, it returns as a string. " )
1712
+ " an output as binary. Otherwise, it returns as a string." )
1712
1713
.booleanConf
1713
1714
.createWithDefault(false )
1714
1715
1715
1716
val VALIDATE_PARTITION_COLUMNS =
1716
1717
buildConf(" spark.sql.sources.validatePartitionColumns" )
1717
1718
.internal()
1718
1719
.doc(" When this option is set to true, partition column values will be validated with " +
1719
- " user-specified schema. If the validation fails, a runtime exception is thrown." +
1720
+ " user-specified schema. If the validation fails, a runtime exception is thrown. " +
1720
1721
" When this option is set to false, the partition column value will be converted to null " +
1721
1722
" if it can not be casted to corresponding user-specified schema." )
1722
1723
.booleanConf
@@ -2129,7 +2130,7 @@ object SQLConf {
2129
2130
buildConf(" spark.sql.legacy.fromDayTimeString.enabled" )
2130
2131
.internal()
2131
2132
.doc(" When true, the `from` bound is not taken into account in conversion of " +
2132
- " a day-time string to an interval, and the `to` bound is used to skip" +
2133
+ " a day-time string to an interval, and the `to` bound is used to skip " +
2133
2134
" all interval units out of the specified range. If it is set to `false`, " +
2134
2135
" `ParseException` is thrown if the input does not match to the pattern " +
2135
2136
" defined by `from` and `to`." )
0 commit comments