18
18
package org .apache .spark .scheduler
19
19
20
20
import java .nio .ByteBuffer
21
- import java .util .HashSet
22
21
23
22
import scala .collection .mutable .HashMap
23
+ import scala .collection .mutable .Set
24
24
import scala .concurrent .duration ._
25
25
26
26
import org .mockito .Matchers .{anyInt , anyObject , anyString , eq => meq }
@@ -40,7 +40,7 @@ class FakeSchedulerBackend extends SchedulerBackend {
40
40
def reviveOffers () {}
41
41
def defaultParallelism (): Int = 1
42
42
def maxNumConcurrentTasks (): Int = 0
43
- val killedTaskIds : HashSet [Long ] = new HashSet [Long ]()
43
+ val killedTaskIds : Set [Long ] = Set [Long ]()
44
44
override def killTask (
45
45
taskId : Long ,
46
46
executorId : String ,
@@ -1328,22 +1328,30 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B
1328
1328
tsm.handleFailedTask(tsm.taskAttempts.head.head.taskId, TaskState .FAILED , TaskKilled (" test" ))
1329
1329
assert(tsm.isZombie)
1330
1330
}
1331
+
1331
1332
test(" SPARK-25250 On successful completion of a task attempt on a partition id, kill other" +
1332
1333
" running task attempts on that same partition" ) {
1333
1334
val taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
1335
+
1334
1336
val firstAttempt = FakeTask .createTaskSet(10 , stageAttemptId = 0 )
1335
1337
taskScheduler.submitTasks(firstAttempt)
1338
+
1336
1339
val offersFirstAttempt = (0 until 10 ).map{ idx => WorkerOffer (s " exec- $idx" , s " host- $idx" , 1 ) }
1337
1340
taskScheduler.resourceOffers(offersFirstAttempt)
1341
+
1338
1342
val tsm0 = taskScheduler.taskSetManagerForAttempt(0 , 0 ).get
1339
1343
val matchingTaskInfoFirstAttempt = tsm0.taskAttempts(0 ).head
1340
1344
tsm0.handleFailedTask(matchingTaskInfoFirstAttempt.taskId, TaskState .FAILED ,
1341
1345
FetchFailed (null , 0 , 0 , 0 , " fetch failed" ))
1346
+
1342
1347
val secondAttempt = FakeTask .createTaskSet(10 , stageAttemptId = 1 )
1343
1348
taskScheduler.submitTasks(secondAttempt)
1349
+
1344
1350
val offersSecondAttempt = (0 until 10 ).map{ idx => WorkerOffer (s " exec- $idx" , s " host- $idx" , 1 ) }
1345
1351
taskScheduler.resourceOffers(offersSecondAttempt)
1352
+
1346
1353
taskScheduler.markPartitionIdAsCompletedAndKillCorrespondingTaskAttempts(2 , 0 )
1354
+
1347
1355
val tsm1 = taskScheduler.taskSetManagerForAttempt(0 , 1 ).get
1348
1356
val indexInTsm = tsm1.partitionToIndex(2 )
1349
1357
val matchingTaskInfoSecondAttempt = tsm1.taskAttempts.flatten.filter(_.index == indexInTsm).head
0 commit comments