@@ -30,7 +30,7 @@ import org.apache.spark.sql.catalyst.plans.logical.{AppendData, InsertIntoTable,
30
30
import org .apache .spark .sql .execution .SQLExecution
31
31
import org .apache .spark .sql .execution .command .DDLUtils
32
32
import org .apache .spark .sql .execution .datasources .{CreateTable , DataSource , LogicalRelation }
33
- import org .apache .spark .sql .execution .datasources .v2 .{ DataSourceV2Relation , DataSourceV2Utils , FileDataSourceV2 , WriteToDataSourceV2 }
33
+ import org .apache .spark .sql .execution .datasources .v2 ._
34
34
import org .apache .spark .sql .sources .BaseRelation
35
35
import org .apache .spark .sql .sources .v2 ._
36
36
import org .apache .spark .sql .sources .v2 .writer .SupportsSaveMode
@@ -265,38 +265,23 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
265
265
val dsOptions = new DataSourceOptions (options.asJava)
266
266
provider.getTable(dsOptions) match {
267
267
case table : SupportsBatchWrite =>
268
- lazy val relation = DataSourceV2Relation .create(table, options)
269
- mode match {
270
- case SaveMode .Append =>
271
- runCommand(df.sparkSession, " save" ) {
272
- AppendData .byName(relation, df.logicalPlan)
273
- }
274
-
275
- case SaveMode .Overwrite =>
276
- // truncate the table
277
- runCommand(df.sparkSession, " save" ) {
278
- OverwriteByExpression .byName(relation, df.logicalPlan, Literal (true ))
268
+ table.newWriteBuilder(dsOptions) match {
269
+ case writeBuilder : SupportsSaveMode =>
270
+ val write = writeBuilder.mode(mode)
271
+ .withQueryId(UUID .randomUUID().toString)
272
+ .withInputDataSchema(df.logicalPlan.schema)
273
+ .buildForBatch()
274
+ // It can only return null with `SupportsSaveMode`. We can clean it up after
275
+ // removing `SupportsSaveMode`.
276
+ if (write != null ) {
277
+ runCommand(df.sparkSession, " save" ) {
278
+ WriteToDataSourceV2 (write, df.logicalPlan)
279
+ }
279
280
}
280
281
281
282
case _ =>
282
- table.newWriteBuilder(dsOptions) match {
283
- case writeBuilder : SupportsSaveMode =>
284
- val write = writeBuilder.mode(mode)
285
- .withQueryId(UUID .randomUUID().toString)
286
- .withInputDataSchema(df.logicalPlan.schema)
287
- .buildForBatch()
288
- // It can only return null with `SupportsSaveMode`. We can clean it up after
289
- // removing `SupportsSaveMode`.
290
- if (write != null ) {
291
- runCommand(df.sparkSession, " save" ) {
292
- WriteToDataSourceV2 (write, df.logicalPlan)
293
- }
294
- }
295
-
296
- case _ =>
297
- throw new AnalysisException (
298
- s " data source ${table.name} does not support SaveMode $mode" )
299
- }
283
+ throw new AnalysisException (
284
+ s " data source ${table.name} does not support SaveMode $mode" )
300
285
}
301
286
302
287
// Streaming also uses the data source V2 API. So it may be that the data source implements
0 commit comments