@@ -913,7 +913,7 @@ class DAGScheduler(
913
913
partitionsToCompute.map { id =>
914
914
val locs = getPreferredLocs(stage.rdd, id)
915
915
val part = stage.rdd.partitions(id)
916
- new ShuffleMapTask (stage.id, stage.attemptId, taskBinary, part, locs)
916
+ new ShuffleMapTask (stage.id, stage.latestInfo. attemptId, taskBinary, part, locs)
917
917
}
918
918
919
919
case stage : ResultStage =>
@@ -922,7 +922,7 @@ class DAGScheduler(
922
922
val p : Int = job.partitions(id)
923
923
val part = stage.rdd.partitions(p)
924
924
val locs = getPreferredLocs(stage.rdd, p)
925
- new ResultTask (stage.id, stage.attemptId, taskBinary, part, locs, id)
925
+ new ResultTask (stage.id, stage.latestInfo. attemptId, taskBinary, part, locs, id)
926
926
}
927
927
}
928
928
} catch {
@@ -1128,8 +1128,6 @@ class DAGScheduler(
1128
1128
val failedStage = stageIdToStage(task.stageId)
1129
1129
val mapStage = shuffleToMapStage(shuffleId)
1130
1130
1131
- // failedStage.attemptId is already on the next attempt, so we have to use
1132
- // failedStage.latestInfo.attemptId
1133
1131
if (failedStage.latestInfo.attemptId != task.stageAttemptId) {
1134
1132
logInfo(s " Ignoring fetch failure from $task as it's from $failedStage attempt " +
1135
1133
s " ${task.stageAttemptId}, which has already failed " )
0 commit comments