Skip to content

Commit 8ee8949

Browse files
committed
Style fixes
1 parent 4126c1b commit 8ee8949

File tree

2 files changed

+10
-14
lines changed

2 files changed

+10
-14
lines changed

core/src/main/scala/org/apache/spark/storage/BlockManager.scala

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ import java.nio.channels.Channels
2424
import java.util.Collections
2525
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, TimeUnit}
2626

27+
import scala.collection.JavaConverters._
2728
import scala.collection.mutable
2829
import scala.collection.mutable.HashMap
29-
import scala.collection.JavaConverters._
3030
import scala.concurrent.{ExecutionContext, Future}
3131
import scala.concurrent.duration._
3232
import scala.reflect.ClassTag
@@ -1819,6 +1819,12 @@ private[spark] class BlockManager(
18191819
@volatile var running = true
18201820
override def run(): Unit = {
18211821
var migrating: Option[(Int, Long)] = None
1822+
val storageLevel = StorageLevel(
1823+
useDisk = true,
1824+
useMemory = false,
1825+
useOffHeap = false,
1826+
deserialized = false,
1827+
replication = 1)
18221828
// Once a block fails to transfer to an executor stop trying to transfer more blocks
18231829
try {
18241830
while (running) {
@@ -1839,25 +1845,15 @@ private[spark] class BlockManager(
18391845
peer.executorId,
18401846
indexBlockId,
18411847
indexBuffer,
1842-
StorageLevel(
1843-
useDisk=true,
1844-
useMemory=false,
1845-
useOffHeap=false,
1846-
deserialized=false,
1847-
replication=1),
1848+
storageLevel,
18481849
null)// class tag, we don't need for shuffle
18491850
blockTransferService.uploadBlockSync(
18501851
peer.host,
18511852
peer.port,
18521853
peer.executorId,
18531854
dataBlockId,
18541855
dataBuffer,
1855-
StorageLevel(
1856-
useDisk=true,
1857-
useMemory=false,
1858-
useOffHeap=false,
1859-
deserialized=false,
1860-
replication=1),
1856+
storageLevel,
18611857
null)// class tag, we don't need for shuffle
18621858
}
18631859
}

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/DecommissionSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ private[spark] trait DecommissionSuite { k8sSuite: KubernetesSuite =>
3131
.set("spark.storage.decommission.enabled", "true")
3232
.set("spark.storage.decommission.shuffle_blocks", "true")
3333
.set("spark.storage.decommission.shuffle_blocks", "true")
34-
//Ensure we have somewhere to migrate our data too
34+
// Ensure we have somewhere to migrate our data too
3535
.set("spark.executor.instances", "3")
3636
// The default of 30 seconds is fine, but for testing we just want to get this done fast.
3737
.set("spark.storage.decommission.replicationReattemptInterval", "1s")

0 commit comments

Comments
 (0)