Skip to content

Commit e81aa5a

Browse files
committed
Use the remoteBlockSize param in the tests instead of conditioning on if we're testing shuffles or not
1 parent 4cfeb8e commit e81aa5a

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionSuite.scala

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,8 @@ class BlockManagerDecommissionSuite extends SparkFunSuite with LocalSparkContext
6666
// workload we need to worry about.
6767
.set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 1L)
6868

69-
// Force fetching to local disk
70-
if (shuffle) {
71-
conf.set("spark.network.maxRemoteBlockSizeFetchToMem", "1")
72-
}
69+
// Allow force fetching to local disk
70+
conf.set("spark.network.maxRemoteBlockSizeFetchToMem", remoteBlockSize)
7371

7472
sc = new SparkContext(master, "test", conf)
7573

0 commit comments

Comments
 (0)