@@ -196,7 +196,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
196196 taskSetManager : TaskSetManager ,
197197 taskDataSeq : Seq [(Long , ByteBuffer )]): Unit = {
198198 val tids = taskDataSeq.map(_._1)
199- val taskId2TaskIdx = scheduler.removeMultiRunningTasks(taskSetManager, tids)
199+ val taskId2TaskPartitionId = scheduler.removeMultiRunningTasks(taskSetManager, tids)
200200
201201 // Killed tasks due to result size exceeds
202202 val killTaskIds = new ArrayBuffer [Long ]
@@ -212,7 +212,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
212212 val tid = t._1
213213 val serializedData = t._2
214214 try {
215- val taskIdx = taskId2TaskIdx .get(tid).get
215+ val taskPartitionId = taskId2TaskPartitionId .get(tid).get
216216 serializer.get().deserialize[TaskResult [_]](serializedData) match {
217217 case directResult : DirectTaskResult [_] =>
218218 val start = System .currentTimeMillis()
@@ -244,7 +244,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
244244 if (resultValue == null ) {
245245 logWarning(s " TID ${tid} deserializeDirectResult is null " )
246246 // There is possible lock contention to the TaskSetResultStore
247- store.save(taskIdx , null , 0 , taskSetManager.taskSet.id)
247+ store.save(taskPartitionId , null , 0 , taskSetManager.taskSet.id)
248248 if (store.isFinished) {
249249 resultStoreMap.remove(taskSetManager.taskSet.stageId.toString)
250250 }
@@ -257,7 +257,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
257257 getLargeResultExecutor.execute(
258258 new SpillDirectResultTask (store,
259259 tid,
260- taskIdx ,
260+ taskPartitionId ,
261261 resultValue,
262262 resultSize,
263263 directResult,
@@ -266,7 +266,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
266266 } else {
267267 // There is possible lock contention to the TaskSetResultStore
268268 val (returnResult, spilledSize) = store.save(
269- taskIdx , resultValue, resultSize, taskSetManager.taskSet.id)
269+ taskPartitionId , resultValue, resultSize, taskSetManager.taskSet.id)
270270 if (spilledSize > 0 ) {
271271 taskSetManager.totalResultInMemorySize.addAndGet(- spilledSize)
272272 }
@@ -314,7 +314,8 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
314314 // There is possible lock contention to the TaskSetResultStore
315315 if (store.maybeSpill(size)) {
316316 getLargeResultExecutor.execute(
317- new FetchLargeResultTask (tid, taskIdx, blockId, size, taskSetManager, store))
317+ new FetchLargeResultTask (tid, taskPartitionId, blockId, size,
318+ taskSetManager, store))
318319 } else {
319320 successInDirectTaskIds += tid
320321 val result =
@@ -324,7 +325,7 @@ private[spark] class SpillableTaskResultGetter(sparkEnv: SparkEnv, scheduler: Ta
324325 failureTaskIds += tid
325326 } else {
326327 val (returnResult, spilledSize) = store.save(
327- taskIdx , result.value(), size, taskSetManager.taskSet.id)
328+ taskPartitionId , result.value(), size, taskSetManager.taskSet.id)
328329 if (spilledSize > 0 ) {
329330 taskSetManager.totalResultInMemorySize.addAndGet(- spilledSize)
330331 }
0 commit comments