Skip to content

Commit 02b51bf

Browse files
committed
fix buidl
1 parent b6f7a12 commit 02b51bf

File tree

2 files changed

+23
-15
lines changed

2 files changed

+23
-15
lines changed

core/src/main/scala/org/apache/spark/scheduler/MapStatusLocationFactory.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package org.apache.spark.scheduler
1919

2020
import java.io.ObjectInput
2121

22-
import org.apache.curator.shaded.com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
22+
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
2323

2424
import org.apache.spark.{SparkConf, SparkException}
2525
import org.apache.spark.internal.config

core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -538,14 +538,14 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
538538
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
539539
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
540540
assert(initialMapStatus1.count(_ != null) === 3)
541-
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
541+
assert(initialMapStatus1.map{_.location.asInstanceOf[BlockManagerId].executorId}.toSet ===
542542
Set("hostA-exec1", "hostA-exec2", "hostB-exec"))
543543
assert(initialMapStatus1.map{_.mapId}.toSet === Set(5, 6, 7))
544544

545545
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
546546
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
547547
assert(initialMapStatus2.count(_ != null) === 3)
548-
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
548+
assert(initialMapStatus2.map{_.location.asInstanceOf[BlockManagerId].executorId}.toSet ===
549549
Set("hostA-exec1", "hostA-exec2", "hostB-exec"))
550550
assert(initialMapStatus2.map{_.mapId}.toSet === Set(8, 9, 10))
551551

@@ -561,13 +561,13 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
561561

562562
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
563563
assert(mapStatus1.count(_ != null) === 1)
564-
assert(mapStatus1(2).location.executorId === "hostB-exec")
565-
assert(mapStatus1(2).location.host === "hostB")
564+
assert(mapStatus1(2).location.asInstanceOf[BlockManagerId].executorId === "hostB-exec")
565+
assert(mapStatus1(2).location.asInstanceOf[BlockManagerId].host === "hostB")
566566

567567
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
568568
assert(mapStatus2.count(_ != null) === 1)
569-
assert(mapStatus2(2).location.executorId === "hostB-exec")
570-
assert(mapStatus2(2).location.host === "hostB")
569+
assert(mapStatus2(2).location.asInstanceOf[BlockManagerId].executorId === "hostB-exec")
570+
assert(mapStatus2(2).location.asInstanceOf[BlockManagerId].host === "hostB")
571571
}
572572

573573
test("SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure") {
@@ -591,8 +591,10 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
591591
// The MapOutputTracker has all the shuffle files
592592
val mapStatuses = mapOutputTracker.shuffleStatuses(shuffleId).mapStatuses
593593
assert(mapStatuses.count(_ != null) === 3)
594-
assert(mapStatuses.count(s => s != null && s.location.executorId == "hostA-exec") === 2)
595-
assert(mapStatuses.count(s => s != null && s.location.executorId == "hostB-exec") === 1)
594+
assert(mapStatuses.count(s => s != null &&
595+
s.location.asInstanceOf[BlockManagerId].executorId == "hostA-exec") === 2)
596+
assert(mapStatuses.count(s => s != null &&
597+
s.location.asInstanceOf[BlockManagerId].executorId == "hostB-exec") === 1)
596598

597599
// Now a fetch failure from the lost executor occurs
598600
complete(taskSets(1), Seq(
@@ -605,8 +607,10 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
605607

606608
// Shuffle files for hostA-exec should be lost
607609
assert(mapStatuses.count(_ != null) === 1)
608-
assert(mapStatuses.count(s => s != null && s.location.executorId == "hostA-exec") === 0)
609-
assert(mapStatuses.count(s => s != null && s.location.executorId == "hostB-exec") === 1)
610+
assert(mapStatuses.count(s => s != null &&
611+
s.location.asInstanceOf[BlockManagerId].executorId == "hostA-exec") === 0)
612+
assert(mapStatuses.count(s => s != null &&
613+
s.location.asInstanceOf[BlockManagerId].executorId == "hostB-exec") === 1)
610614

611615
// Additional fetch failure from the executor does not result in further call to
612616
// mapOutputTracker.removeOutputsOnExecutor
@@ -843,7 +847,8 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
843847
// have the 2nd attempt pass
844848
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
845849
// we can see both result blocks now
846-
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
850+
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
851+
.map(_._1.asInstanceOf[BlockManagerId].host).toSet ===
847852
HashSet("hostA", "hostB"))
848853
completeAndCheckAnswer(taskSets(3), Seq((Success, 43)), Map(0 -> 42, 1 -> 43))
849854
assertDataStructuresEmpty()
@@ -1228,7 +1233,8 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
12281233
submit(reduceRdd, Array(0, 1))
12291234
completeShuffleMapStageSuccessfully(0, 0, reduceRdd.partitions.length)
12301235
// The MapOutputTracker should know about both map output locations.
1231-
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
1236+
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
1237+
.map(_._1.asInstanceOf[BlockManagerId].host).toSet ===
12321238
HashSet("hostA", "hostB"))
12331239

12341240
// The first result task fails, with a fetch failure for the output from the first mapper.
@@ -1349,9 +1355,11 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
13491355

13501356
completeShuffleMapStageSuccessfully(0, 0, 2)
13511357
// The MapOutputTracker should know about both map output locations.
1352-
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
1358+
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
1359+
.map(_._1.asInstanceOf[BlockManagerId].host).toSet ===
13531360
HashSet("hostA", "hostB"))
1354-
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
1361+
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1)
1362+
.map(_._1.asInstanceOf[BlockManagerId].host).toSet ===
13551363
HashSet("hostA", "hostB"))
13561364

13571365
// The first result task fails, with a fetch failure for the output from the first mapper.

0 commit comments

Comments
 (0)