Skip to content

Commit f01bff9

Browse files
committed
[SPARK-39007][SQL] Use double quotes for SQL configs in error messages
### What changes were proposed in this pull request? Wrap SQL configs in error messages by double quotes. Added the `toSQLConf()` method to `QueryErrorsBase` to invoke it from `Query.*Errors`. ### Why are the changes needed? 1. To highlight types and make them more visible for users. 2. To be able to easily parse types from error text. 3. To be consistent to other outputs of identifiers, sql statement and etc. where Spark uses quotes or ticks. ### Does this PR introduce _any_ user-facing change? Yes, it changes user-facing error messages. ### How was this patch tested? By running the modified test suites: ``` $ build/sbt "testOnly *QueryCompilationErrorsSuite" $ build/sbt "testOnly *QueryExecutionAnsiErrorsSuite" $ build/sbt "testOnly *QueryExecutionErrorsSuite" ``` Closes #36335 from MaxGekk/output-conf-error-class. Authored-by: Max Gekk <max.gekk@gmail.com> Signed-off-by: Max Gekk <max.gekk@gmail.com>
1 parent 8c80016 commit f01bff9

File tree

20 files changed

+93
-74
lines changed

20 files changed

+93
-74
lines changed

core/src/main/resources/error/error-classes.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@
197197
"message" : [ "The operation is not supported: <operation>" ]
198198
},
199199
"UNTYPED_SCALA_UDF" : {
200-
"message" : [ "You're using untyped Scala UDF, which does not have the input type information. Spark may blindly pass null to the Scala closure with primitive-type argument, and the closure will see the default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. To get rid of this error, you could:\n1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { override def call(s: String): Integer = s.length() }, IntegerType)`, if input types are all non primitive\n3. set spark.sql.legacy.allowUntypedScalaUDF to true and use this API with caution" ]
200+
"message" : [ "You're using untyped Scala UDF, which does not have the input type information. Spark may blindly pass null to the Scala closure with primitive-type argument, and the closure will see the default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. To get rid of this error, you could:\n1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { override def call(s: String): Integer = s.length() }, IntegerType)`, if input types are all non primitive\n3. set \"spark.sql.legacy.allowUntypedScalaUDF\" to true and use this API with caution" ]
201201
},
202202
"WRITING_JOB_ABORTED" : {
203203
"message" : [ "Writing job aborted" ],

sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,13 @@ trait QueryErrorsBase {
4848
litToErrorValue(Literal.create(v, t))
4949
}
5050

51+
private def quoteByDefault(elem: String): String = {
52+
"\"" + elem + "\""
53+
}
54+
5155
// Quote sql statements in error messages.
5256
def toSQLStmt(text: String): String = {
53-
"\"" + text.toUpperCase(Locale.ROOT) + "\""
57+
quoteByDefault(text.toUpperCase(Locale.ROOT))
5458
}
5559

5660
def toSQLId(parts: Seq[String]): String = {
@@ -62,6 +66,10 @@ trait QueryErrorsBase {
6266
}
6367

6468
def toSQLType(t: DataType): String = {
65-
"\"" + t.sql + "\""
69+
quoteByDefault(t.sql)
70+
}
71+
72+
def toSQLConf(conf: String): String = {
73+
quoteByDefault(conf)
6674
}
6775
}

sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -91,17 +91,23 @@ object QueryExecutionErrors extends QueryErrorsBase {
9191

9292
def castingCauseOverflowError(t: Any, dataType: DataType): ArithmeticException = {
9393
new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW",
94-
messageParameters = Array(toSQLValue(t), toSQLType(dataType), SQLConf.ANSI_ENABLED.key))
94+
messageParameters = Array(
95+
toSQLValue(t), toSQLType(dataType), toSQLConf(SQLConf.ANSI_ENABLED.key)))
9596
}
9697

9798
def cannotChangeDecimalPrecisionError(
9899
value: Decimal,
99100
decimalPrecision: Int,
100101
decimalScale: Int,
101102
context: String): ArithmeticException = {
102-
new SparkArithmeticException(errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
103-
messageParameters = Array(value.toDebugString,
104-
decimalPrecision.toString, decimalScale.toString, SQLConf.ANSI_ENABLED.key, context))
103+
new SparkArithmeticException(
104+
errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
105+
messageParameters = Array(
106+
value.toDebugString,
107+
decimalPrecision.toString,
108+
decimalScale.toString,
109+
toSQLConf(SQLConf.ANSI_ENABLED.key),
110+
context))
105111
}
106112

107113
def invalidInputSyntaxForNumericError(
@@ -148,7 +154,8 @@ object QueryExecutionErrors extends QueryErrorsBase {
148154

149155
def divideByZeroError(context: String): ArithmeticException = {
150156
new SparkArithmeticException(
151-
errorClass = "DIVIDE_BY_ZERO", messageParameters = Array(SQLConf.ANSI_ENABLED.key, context))
157+
errorClass = "DIVIDE_BY_ZERO",
158+
messageParameters = Array(toSQLConf(SQLConf.ANSI_ENABLED.key), context))
152159
}
153160

154161
def invalidArrayIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = {
@@ -163,8 +170,9 @@ object QueryExecutionErrors extends QueryErrorsBase {
163170
index: Int,
164171
numElements: Int,
165172
key: String): ArrayIndexOutOfBoundsException = {
166-
new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX",
167-
messageParameters = Array(toSQLValue(index), toSQLValue(numElements), key))
173+
new SparkArrayIndexOutOfBoundsException(
174+
errorClass = "INVALID_ARRAY_INDEX",
175+
messageParameters = Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(key)))
168176
}
169177

170178
def invalidElementAtIndexError(
@@ -173,7 +181,7 @@ object QueryExecutionErrors extends QueryErrorsBase {
173181
new SparkArrayIndexOutOfBoundsException(
174182
errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT",
175183
messageParameters =
176-
Array(toSQLValue(index), toSQLValue(numElements), SQLConf.ANSI_ENABLED.key))
184+
Array(toSQLValue(index), toSQLValue(numElements), toSQLConf(SQLConf.ANSI_ENABLED.key)))
177185
}
178186

179187
def mapKeyNotExistError(key: Any, context: String): NoSuchElementException = {
@@ -182,8 +190,9 @@ object QueryExecutionErrors extends QueryErrorsBase {
182190
}
183191

184192
def invalidFractionOfSecondError(): DateTimeException = {
185-
new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND",
186-
Array(SQLConf.ANSI_ENABLED.key))
193+
new SparkDateTimeException(
194+
errorClass = "INVALID_FRACTION_OF_SECOND",
195+
Array(toSQLConf(SQLConf.ANSI_ENABLED.key)))
187196
}
188197

189198
def ansiDateTimeParseError(e: DateTimeParseException): DateTimeParseException = {
@@ -521,10 +530,10 @@ object QueryExecutionErrors extends QueryErrorsBase {
521530
|from $format files can be ambiguous, as the files may be written by
522531
|Spark 2.x or legacy versions of Hive, which uses a legacy hybrid calendar
523532
|that is different from Spark 3.0+'s Proleptic Gregorian calendar.
524-
|See more details in SPARK-31404. You can set the SQL config '$config' or
533+
|See more details in SPARK-31404. You can set the SQL config ${toSQLConf(config)} or
525534
|the datasource option '$option' to 'LEGACY' to rebase the datetime values
526535
|w.r.t. the calendar difference during reading. To read the datetime values
527-
|as it is, set the SQL config '$config' or the datasource option '$option'
536+
|as it is, set the SQL config ${toSQLConf(config)} or the datasource option '$option'
528537
|to 'CORRECTED'.
529538
|""".stripMargin),
530539
cause = null
@@ -541,10 +550,10 @@ object QueryExecutionErrors extends QueryErrorsBase {
541550
|into $format files can be dangerous, as the files may be read by Spark 2.x
542551
|or legacy versions of Hive later, which uses a legacy hybrid calendar that
543552
|is different from Spark 3.0+'s Proleptic Gregorian calendar. See more
544-
|details in SPARK-31404. You can set $config to 'LEGACY' to rebase the
553+
|details in SPARK-31404. You can set ${toSQLConf(config)} to 'LEGACY' to rebase the
545554
|datetime values w.r.t. the calendar difference during writing, to get maximum
546-
|interoperability. Or set $config to 'CORRECTED' to write the datetime values
547-
|as it is, if you are 100% sure that the written files will only be read by
555+
|interoperability. Or set ${toSQLConf(config)} to 'CORRECTED' to write the datetime
556+
|values as it is, if you are 100% sure that the written files will only be read by
548557
|Spark 3.0+ or other systems that use Proleptic Gregorian calendar.
549558
|""".stripMargin),
550559
cause = null

sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5)
168168
struct<>
169169
-- !query output
170170
org.apache.spark.SparkArrayIndexOutOfBoundsException
171-
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error.
171+
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
172172

173173

174174
-- !query
@@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5)
177177
struct<>
178178
-- !query output
179179
org.apache.spark.SparkArrayIndexOutOfBoundsException
180-
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error.
180+
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
181181

182182

183183
-- !query
@@ -195,7 +195,7 @@ select elt(4, '123', '456')
195195
struct<>
196196
-- !query output
197197
org.apache.spark.SparkArrayIndexOutOfBoundsException
198-
[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
198+
[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
199199

200200

201201
-- !query
@@ -204,7 +204,7 @@ select elt(0, '123', '456')
204204
struct<>
205205
-- !query output
206206
org.apache.spark.SparkArrayIndexOutOfBoundsException
207-
[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
207+
[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
208208

209209

210210
-- !query
@@ -213,7 +213,7 @@ select elt(-1, '123', '456')
213213
struct<>
214214
-- !query output
215215
org.apache.spark.SparkArrayIndexOutOfBoundsException
216-
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
216+
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
217217

218218

219219
-- !query
@@ -254,7 +254,7 @@ select array(1, 2, 3)[5]
254254
struct<>
255255
-- !query output
256256
org.apache.spark.SparkArrayIndexOutOfBoundsException
257-
[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error.
257+
[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
258258

259259

260260
-- !query
@@ -263,7 +263,7 @@ select array(1, 2, 3)[-1]
263263
struct<>
264264
-- !query output
265265
org.apache.spark.SparkArrayIndexOutOfBoundsException
266-
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set spark.sql.ansi.enabled to false to bypass this error.
266+
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
267267

268268

269269
-- !query
@@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5)
337337
struct<>
338338
-- !query output
339339
org.apache.spark.SparkArrayIndexOutOfBoundsException
340-
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error.
340+
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
341341

342342

343343
-- !query
@@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5)
346346
struct<>
347347
-- !query output
348348
org.apache.spark.SparkArrayIndexOutOfBoundsException
349-
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set spark.sql.ansi.enabled to false to bypass this error.
349+
[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
350350

351351

352352
-- !query
@@ -364,7 +364,7 @@ select elt(4, '123', '456')
364364
struct<>
365365
-- !query output
366366
org.apache.spark.SparkArrayIndexOutOfBoundsException
367-
[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
367+
[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
368368

369369

370370
-- !query
@@ -373,7 +373,7 @@ select elt(0, '123', '456')
373373
struct<>
374374
-- !query output
375375
org.apache.spark.SparkArrayIndexOutOfBoundsException
376-
[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
376+
[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
377377

378378

379379
-- !query
@@ -382,4 +382,4 @@ select elt(-1, '123', '456')
382382
struct<>
383383
-- !query output
384384
org.apache.spark.SparkArrayIndexOutOfBoundsException
385-
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set spark.sql.ansi.enabled to false to bypass this error.
385+
[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.

sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,7 @@ select cast('123.45' as decimal(4, 2))
666666
struct<>
667667
-- !query output
668668
org.apache.spark.SparkArithmeticException
669-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error.
669+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be represented as Decimal(4, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
670670
== SQL(line 1, position 7) ==
671671
select cast('123.45' as decimal(4, 2))
672672
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ select (5e36BD + 0.1) + 5e36BD
7676
struct<>
7777
-- !query output
7878
org.apache.spark.SparkArithmeticException
79-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error.
79+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
8080
== SQL(line 1, position 7) ==
8181
select (5e36BD + 0.1) + 5e36BD
8282
^^^^^^^^^^^^^^^^^^^^^^^
@@ -88,7 +88,7 @@ select (-4e36BD - 0.1) - 7e36BD
8888
struct<>
8989
-- !query output
9090
org.apache.spark.SparkArithmeticException
91-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false to bypass this error.
91+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
9292
== SQL(line 1, position 7) ==
9393
select (-4e36BD - 0.1) - 7e36BD
9494
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -100,7 +100,7 @@ select 12345678901234567890.0 * 12345678901234567890.0
100100
struct<>
101101
-- !query output
102102
org.apache.spark.SparkArithmeticException
103-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set spark.sql.ansi.enabled to false to bypass this error.
103+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be represented as Decimal(38, 2). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
104104
== SQL(line 1, position 7) ==
105105
select 12345678901234567890.0 * 12345678901234567890.0
106106
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -112,7 +112,7 @@ select 1e35BD / 0.1
112112
struct<>
113113
-- !query output
114114
org.apache.spark.SparkArithmeticException
115-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error.
115+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be represented as Decimal(38, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
116116
== SQL(line 1, position 7) ==
117117
select 1e35BD / 0.1
118118
^^^^^^^^^^^^

sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ select interval '2 seconds' / 0
228228
struct<>
229229
-- !query output
230230
org.apache.spark.SparkArithmeticException
231-
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error.
231+
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error.
232232
== SQL(line 1, position 7) ==
233233
select interval '2 seconds' / 0
234234
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -264,7 +264,7 @@ select interval '2' year / 0
264264
struct<>
265265
-- !query output
266266
org.apache.spark.SparkArithmeticException
267-
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error.
267+
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error.
268268
== SQL(line 1, position 7) ==
269269
select interval '2' year / 0
270270
^^^^^^^^^^^^^^^^^^^^^
@@ -664,7 +664,7 @@ select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789)
664664
struct<>
665665
-- !query output
666666
org.apache.spark.SparkArithmeticException
667-
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set spark.sql.ansi.enabled to false to bypass this error.
667+
[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) cannot be represented as Decimal(18, 6). If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
668668
== SQL(line 1, position 7) ==
669669
select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789)
670670
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007)
9898
struct<>
9999
-- !query output
100100
org.apache.spark.SparkDateTimeException
101-
[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error.
101+
[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is [0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this error.
102102

103103

104104
-- !query

sql/core/src/test/resources/sql-tests/results/interval.sql.out

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ select interval '2 seconds' / 0
204204
struct<>
205205
-- !query output
206206
org.apache.spark.SparkArithmeticException
207-
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error.
207+
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error.
208208
== SQL(line 1, position 7) ==
209209
select interval '2 seconds' / 0
210210
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -240,7 +240,7 @@ select interval '2' year / 0
240240
struct<>
241241
-- !query output
242242
org.apache.spark.SparkArithmeticException
243-
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) to bypass this error.
243+
[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error.
244244
== SQL(line 1, position 7) ==
245245
select interval '2' year / 0
246246
^^^^^^^^^^^^^^^^^^^^^

0 commit comments

Comments
 (0)