File tree Expand file tree Collapse file tree 2 files changed +1
-5
lines changed
main/scala/org/apache/spark/storage
test/scala/org/apache/spark/storage Expand file tree Collapse file tree 2 files changed +1
-5
lines changed Original file line number Diff line number Diff line change @@ -48,7 +48,6 @@ private[storage] class BlockManagerDecommissioner(
48
48
@ volatile private var rddBlocksLeft : Boolean = true
49
49
@ volatile private var shuffleBlocksLeft : Boolean = true
50
50
51
-
52
51
/**
53
52
* This runnable consumes any shuffle blocks in the queue for migration. This part of a
54
53
* producer/consumer where the main migration loop updates the queue of blocks to be migrated
@@ -129,8 +128,6 @@ private[storage] class BlockManagerDecommissioner(
129
128
// if a new shuffle file is created by a running task.
130
129
private val numMigratedShuffles = new AtomicInteger (0 )
131
130
132
-
133
-
134
131
// Shuffles which are queued for migration & number of retries so far.
135
132
// Visible in storage for testing.
136
133
private [storage] val shufflesToMigrate =
@@ -369,7 +366,7 @@ private[storage] class BlockManagerDecommissioner(
369
366
if (stopped || (stoppedRDD && stoppedShuffle)) {
370
367
(System .nanoTime(), true )
371
368
} else {
372
-
369
+ // Chose the min of the running times.
373
370
val lastMigrationTime = if (
374
371
conf.get(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED ) &&
375
372
conf.get(config.STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED )) {
Original file line number Diff line number Diff line change @@ -92,7 +92,6 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers {
92
92
} finally {
93
93
bmDecomManager.stop()
94
94
}
95
-
96
95
bmDecomManager.stop()
97
96
}
98
97
}
You can’t perform that action at this time.
0 commit comments