Skip to content

Commit 277043e

Browse files
committed
Fix for SparkContext stop behavior
1 parent 446b0a4 commit 277043e

File tree

1 file changed

+1
-6
lines changed

1 file changed

+1
-6
lines changed

core/src/main/scala/org/apache/spark/SparkEnv.scala

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import java.net.Socket
2222

2323
import akka.actor.ActorSystem
2424

25-
import scala.collection.JavaConversions._
2625
import scala.collection.mutable
2726
import scala.util.Properties
2827

@@ -45,8 +44,6 @@ import org.apache.spark.storage._
4544
import org.apache.spark.unsafe.memory.{ExecutorMemoryManager, MemoryAllocator}
4645
import org.apache.spark.util.{RpcUtils, Utils}
4746

48-
import scala.util.control.NonFatal
49-
5047
/**
5148
* :: DeveloperApi ::
5249
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
@@ -95,8 +92,7 @@ class SparkEnv (
9592

9693
if (!isStopped) {
9794
isStopped = true
98-
99-
pythonWorkers.foreach { case (key, worker) => worker.stop()}
95+
pythonWorkers.values.foreach(_.stop())
10096
Option(httpFileServer).foreach(_.stop())
10197
mapOutputTracker.stop()
10298
shuffleManager.stop()
@@ -107,7 +103,6 @@ class SparkEnv (
107103
outputCommitCoordinator.stop()
108104
rpcEnv.shutdown()
109105

110-
111106
// Unfortunately Akka's awaitTermination doesn't actually wait for the Netty server to shut
112107
// down, but let's call it anyway in case it gets fixed in a later release
113108
// UPDATE: In Akka 2.1.x, this hangs if there are remote actors, so we can't call it.

0 commit comments

Comments
 (0)