Skip to content

Commit 0058986

Browse files
10110346srowen
authored andcommitted
[MINOR][DOC] Correct some document description errors
Correct some document description errors. N/A Closes #23162 from 10110346/docerror. Authored-by: liuxian <liu.xian3@zte.com.cn> Signed-off-by: Sean Owen <sean.owen@databricks.com> (cherry picked from commit 60e4239) Signed-off-by: Sean Owen <sean.owen@databricks.com>
1 parent 4ee463a commit 0058986

File tree

2 files changed

+7
-5
lines changed
  • core/src/main/scala/org/apache/spark/internal/config
  • sql/catalyst/src/main/scala/org/apache/spark/sql/internal

2 files changed

+7
-5
lines changed

core/src/main/scala/org/apache/spark/internal/config/package.scala

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ package object config {
225225
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
226226
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
227227
.intConf
228-
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
228+
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
229229
.createWithDefault(10000)
230230

231231
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
@@ -436,7 +436,8 @@ package object config {
436436
"made in creating intermediate shuffle files.")
437437
.bytesConf(ByteUnit.KiB)
438438
.checkValue(v => v > 0 && v <= Int.MaxValue / 1024,
439-
s"The file buffer size must be greater than 0 and less than ${Int.MaxValue / 1024}.")
439+
"The file buffer size must be greater than 0 and less than or equal to " +
440+
s"${Int.MaxValue / 1024}.")
440441
.createWithDefaultString("32k")
441442

442443
private[spark] val SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE =
@@ -445,7 +446,8 @@ package object config {
445446
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
446447
.bytesConf(ByteUnit.KiB)
447448
.checkValue(v => v > 0 && v <= Int.MaxValue / 1024,
448-
s"The buffer size must be greater than 0 and less than ${Int.MaxValue / 1024}.")
449+
"The buffer size must be greater than 0 and less than or equal to " +
450+
s"${Int.MaxValue / 1024}.")
449451
.createWithDefaultString("32k")
450452

451453
private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =

sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ object SQLConf {
273273
"factor as the estimated data size, in case the data is compressed in the file and lead to" +
274274
" a heavily underestimated result.")
275275
.doubleConf
276-
.checkValue(_ > 0, "the value of fileDataSizeFactor must be larger than 0")
276+
.checkValue(_ > 0, "the value of fileDataSizeFactor must be greater than 0")
277277
.createWithDefault(1.0)
278278

279279
val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema")
@@ -925,7 +925,7 @@ object SQLConf {
925925
.internal()
926926
.doc("The number of bins when generating histograms.")
927927
.intConf
928-
.checkValue(num => num > 1, "The number of bins must be larger than 1.")
928+
.checkValue(num => num > 1, "The number of bins must be greater than 1.")
929929
.createWithDefault(254)
930930

931931
val PERCENTILE_ACCURACY =

0 commit comments

Comments
 (0)