@@ -21,7 +21,7 @@ import java.io._
21
21
import java .lang .management .ManagementFactory
22
22
import java .net ._
23
23
import java .nio .ByteBuffer
24
- import java .util .{Properties , Locale , Random , UUID }
24
+ import java .util .{PriorityQueue , Properties , Locale , Random , UUID }
25
25
import java .util .concurrent ._
26
26
import javax .net .ssl .HttpsURLConnection
27
27
@@ -30,7 +30,7 @@ import scala.collection.Map
30
30
import scala .collection .mutable .ArrayBuffer
31
31
import scala .io .Source
32
32
import scala .reflect .ClassTag
33
- import scala .util .Try
33
+ import scala .util .{ Failure , Success , Try }
34
34
import scala .util .control .{ControlThrowable , NonFatal }
35
35
36
36
import com .google .common .io .{ByteStreams , Files }
@@ -64,9 +64,15 @@ private[spark] object CallSite {
64
64
private [spark] object Utils extends Logging {
65
65
val random = new Random ()
66
66
67
+ val DEFAULT_SHUTDOWN_PRIORITY = 100
68
+
67
69
private val MAX_DIR_CREATION_ATTEMPTS : Int = 10
68
70
@ volatile private var localRootDirs : Array [String ] = null
69
71
72
+
73
+ private val shutdownHooks = new SparkShutdownHookManager ()
74
+ shutdownHooks.install()
75
+
70
76
/** Serialize an object using Java serialization */
71
77
def serialize [T ](o : T ): Array [Byte ] = {
72
78
val bos = new ByteArrayOutputStream ()
@@ -176,18 +182,16 @@ private[spark] object Utils extends Logging {
176
182
private val shutdownDeleteTachyonPaths = new scala.collection.mutable.HashSet [String ]()
177
183
178
184
// Add a shutdown hook to delete the temp dirs when the JVM exits
179
- Runtime .getRuntime.addShutdownHook(new Thread (" delete Spark temp dirs" ) {
180
- override def run (): Unit = Utils .logUncaughtExceptions {
181
- logDebug(" Shutdown hook called" )
182
- shutdownDeletePaths.foreach { dirPath =>
183
- try {
184
- Utils .deleteRecursively(new File (dirPath))
185
- } catch {
186
- case e : Exception => logError(s " Exception while deleting Spark temp dir: $dirPath" , e)
187
- }
185
+ addShutdownHook { () =>
186
+ logDebug(" Shutdown hook called" )
187
+ shutdownDeletePaths.foreach { dirPath =>
188
+ try {
189
+ Utils .deleteRecursively(new File (dirPath))
190
+ } catch {
191
+ case e : Exception => logError(s " Exception while deleting Spark temp dir: $dirPath" , e)
188
192
}
189
193
}
190
- })
194
+ }
191
195
192
196
// Register the path to be deleted via shutdown hook
193
197
def registerShutdownDeleteDir (file : File ) {
@@ -613,7 +617,7 @@ private[spark] object Utils extends Logging {
613
617
}
614
618
Utils .setupSecureURLConnection(uc, securityMgr)
615
619
616
- val timeoutMs =
620
+ val timeoutMs =
617
621
conf.getTimeAsSeconds(" spark.files.fetchTimeout" , " 60s" ).toInt * 1000
618
622
uc.setConnectTimeout(timeoutMs)
619
623
uc.setReadTimeout(timeoutMs)
@@ -1172,7 +1176,7 @@ private[spark] object Utils extends Logging {
1172
1176
/**
1173
1177
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
1174
1178
* default UncaughtExceptionHandler
1175
- *
1179
+ *
1176
1180
* NOTE: This method is to be called by the spark-started JVM process.
1177
1181
*/
1178
1182
def tryOrExit (block : => Unit ) {
@@ -1185,11 +1189,11 @@ private[spark] object Utils extends Logging {
1185
1189
}
1186
1190
1187
1191
/**
1188
- * Execute a block of code that evaluates to Unit, stop SparkContext is there is any uncaught
1192
+ * Execute a block of code that evaluates to Unit, stop SparkContext is there is any uncaught
1189
1193
* exception
1190
- *
1191
- * NOTE: This method is to be called by the driver-side components to avoid stopping the
1192
- * user-started JVM process completely; in contrast, tryOrExit is to be called in the
1194
+ *
1195
+ * NOTE: This method is to be called by the driver-side components to avoid stopping the
1196
+ * user-started JVM process completely; in contrast, tryOrExit is to be called in the
1193
1197
* spark-started JVM process .
1194
1198
*/
1195
1199
def tryOrStopSparkContext (sc : SparkContext )(block : => Unit ) {
@@ -2132,6 +2136,93 @@ private[spark] object Utils extends Logging {
2132
2136
.getOrElse(UserGroupInformation .getCurrentUser().getShortUserName())
2133
2137
}
2134
2138
2139
+ /**
2140
+ * Adds a shutdown hook with default priority.
2141
+ */
2142
+ def addShutdownHook (hook : () => Unit ): AnyRef = {
2143
+ addShutdownHook(DEFAULT_SHUTDOWN_PRIORITY , hook)
2144
+ }
2145
+
2146
+ /**
2147
+ * Adds a shutdown hook with the given priority. Hooks with lower priority values run
2148
+ * first.
2149
+ */
2150
+ def addShutdownHook (priority : Int , hook : () => Unit ): AnyRef = {
2151
+ shutdownHooks.add(priority, hook)
2152
+ }
2153
+
2154
+ /**
2155
+ * Remove a previously installed shutdown hook.
2156
+ */
2157
+ def removeShutdownHook (ref : AnyRef ): Boolean = {
2158
+ shutdownHooks.remove(ref)
2159
+ }
2160
+
2161
+ }
2162
+
2163
+ private [util] class SparkShutdownHookManager {
2164
+
2165
+ private val hooks = new PriorityQueue [SparkShutdownHook ]()
2166
+ private var shuttingDown = false
2167
+
2168
+ /**
2169
+ * Install a hook to run at shutdown and run all registered hooks in order. Hadoop 1.x does not
2170
+ * have `ShutdownHookManager`, so in that case we just use the JVM's `Runtime` object and hope for
2171
+ * the best.
2172
+ */
2173
+ def install (): Unit = {
2174
+ val hookTask = new Runnable () {
2175
+ override def run (): Unit = runAll()
2176
+ }
2177
+ Try (Class .forName(" org.apache.hadoop.util.ShutdownHookManager" )) match {
2178
+ case Success (shmClass) =>
2179
+ val fsPriority = classOf [FileSystem ].getField(" SHUTDOWN_HOOK_PRIORITY" ).get()
2180
+ .asInstanceOf [Int ]
2181
+ val shm = shmClass.getMethod(" get" ).invoke(null )
2182
+ shm.getClass().getMethod(" addShutdownHook" , classOf [Runnable ], classOf [Int ])
2183
+ .invoke(shm, hookTask, Integer .valueOf(fsPriority + 30 ))
2184
+
2185
+ case Failure (_) =>
2186
+ Runtime .getRuntime.addShutdownHook(new Thread (hookTask, " Spark Shutdown Hook" ));
2187
+ }
2188
+ }
2189
+
2190
+ def runAll (): Unit = synchronized {
2191
+ shuttingDown = true
2192
+ while (! hooks.isEmpty()) {
2193
+ Utils .logUncaughtExceptions(hooks.poll().run())
2194
+ }
2195
+ }
2196
+
2197
+ def add (priority : Int , hook : () => Unit ): AnyRef = synchronized {
2198
+ checkState()
2199
+ val hookRef = new SparkShutdownHook (priority, hook)
2200
+ hooks.add(hookRef)
2201
+ hookRef
2202
+ }
2203
+
2204
+ def remove (ref : AnyRef ): Boolean = synchronized {
2205
+ checkState()
2206
+ hooks.remove(ref)
2207
+ }
2208
+
2209
+ private def checkState (): Unit = {
2210
+ if (shuttingDown) {
2211
+ throw new IllegalStateException (" Shutdown hooks cannot be modified during shutdown." )
2212
+ }
2213
+ }
2214
+
2215
+ }
2216
+
2217
+ private class SparkShutdownHook (private val priority : Int , hook : () => Unit )
2218
+ extends Comparable [SparkShutdownHook ] {
2219
+
2220
+ override def compareTo (other : SparkShutdownHook ): Int = {
2221
+ other.priority - priority
2222
+ }
2223
+
2224
+ def run (): Unit = hook()
2225
+
2135
2226
}
2136
2227
2137
2228
/**
0 commit comments