Skip to content

Commit 07e0bf8

Browse files
committed
Added withClue
1 parent 363482e commit 07e0bf8

File tree

1 file changed

+36
-33
lines changed

1 file changed

+36
-33
lines changed

sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala

Lines changed: 36 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -143,40 +143,43 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
143143
withTempPath { file =>
144144
val path = file.getCanonicalPath
145145

146-
val dataGenerator = RandomDataGenerator.forType(
147-
dataType = dataType,
148-
nullable = true,
149-
new Random(System.nanoTime())
150-
).getOrElse {
151-
fail(s"Failed to create data generator for schema $dataType")
146+
val seed = System.nanoTime()
147+
withClue(s"Random data generated with the seed: ${seed}") {
148+
val dataGenerator = RandomDataGenerator.forType(
149+
dataType = dataType,
150+
nullable = true,
151+
new Random(seed)
152+
).getOrElse {
153+
fail(s"Failed to create data generator for schema $dataType")
154+
}
155+
156+
// Create a DF for the schema with random data. The index field is used to sort the
157+
// DataFrame. This is a workaround for SPARK-10591.
158+
val schema = new StructType()
159+
.add("index", IntegerType, nullable = false)
160+
.add("col", dataType, nullable = true)
161+
val rdd =
162+
spark.sparkContext.parallelize((1 to 10).map(i => Row(i, dataGenerator())))
163+
val df = spark.createDataFrame(rdd, schema).orderBy("index").coalesce(1)
164+
165+
df.write
166+
.mode("overwrite")
167+
.format(dataSourceName)
168+
.option("dataSchema", df.schema.json)
169+
.options(extraOptions)
170+
.save(path)
171+
172+
val loadedDF = spark
173+
.read
174+
.format(dataSourceName)
175+
.option("dataSchema", df.schema.json)
176+
.schema(df.schema)
177+
.options(extraOptions)
178+
.load(path)
179+
.orderBy("index")
180+
181+
checkAnswer(loadedDF, df)
152182
}
153-
154-
// Create a DF for the schema with random data. The index field is used to sort the
155-
// DataFrame. This is a workaround for SPARK-10591.
156-
val schema = new StructType()
157-
.add("index", IntegerType, nullable = false)
158-
.add("col", dataType, nullable = true)
159-
val rdd =
160-
spark.sparkContext.parallelize((1 to 10).map(i => Row(i, dataGenerator())))
161-
val df = spark.createDataFrame(rdd, schema).orderBy("index").coalesce(1)
162-
163-
df.write
164-
.mode("overwrite")
165-
.format(dataSourceName)
166-
.option("dataSchema", df.schema.json)
167-
.options(extraOptions)
168-
.save(path)
169-
170-
val loadedDF = spark
171-
.read
172-
.format(dataSourceName)
173-
.option("dataSchema", df.schema.json)
174-
.schema(df.schema)
175-
.options(extraOptions)
176-
.load(path)
177-
.orderBy("index")
178-
179-
checkAnswer(loadedDF, df)
180183
}
181184
}
182185
}

0 commit comments

Comments
 (0)