Skip to content

Commit e5c76d1

Browse files
committed
Make a serializable jobTrackerId instead of a non-serializable JobID in FileWriterFactory
1 parent ba8abdd commit e5c76d1

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileWriterFactory.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package org.apache.spark.sql.execution.datasources.v2
1818

1919
import java.util.Date
2020

21-
import org.apache.hadoop.mapreduce.{TaskAttemptID, TaskID, TaskType}
21+
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
2222
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
2323

2424
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
@@ -30,7 +30,8 @@ case class FileWriterFactory (
3030
description: WriteJobDescription,
3131
committer: FileCommitProtocol) extends DataWriterFactory {
3232

33-
private val jobId = SparkHadoopWriterUtils.createJobID(new Date, 0)
33+
private[this] val jobTrackerID = SparkHadoopWriterUtils.createJobTrackerID(new Date)
34+
@transient private lazy val jobId = new JobID(jobTrackerID, 0)
3435

3536
override def createWriter(partitionId: Int, realTaskId: Long): DataWriter[InternalRow] = {
3637
val taskAttemptContext = createTaskAttemptContext(partitionId)

0 commit comments

Comments
 (0)