@@ -50,7 +50,7 @@ private[spark] class Executor(
50
50
logInfo(s " Starting executor ID $executorId on host $executorHostname" )
51
51
52
52
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
53
- // Each map holds the master's timestamp for the version of that file, JAR, or directory we got.
53
+ // Each map holds the master's timestamp for the version of that file or JAR we got.
54
54
private val currentFiles : HashMap [String , Long ] = new HashMap [String , Long ]()
55
55
private val currentJars : HashMap [String , Long ] = new HashMap [String , Long ]()
56
56
@@ -171,8 +171,7 @@ private[spark] class Executor(
171
171
startGCTime = gcTime
172
172
173
173
try {
174
- val (taskFiles, taskJars, taskBytes) =
175
- Task .deserializeWithDependencies(serializedTask)
174
+ val (taskFiles, taskJars, taskBytes) = Task .deserializeWithDependencies(serializedTask)
176
175
updateDependencies(taskFiles, taskJars)
177
176
task = ser.deserialize[Task [Any ]](taskBytes, Thread .currentThread.getContextClassLoader)
178
177
@@ -334,9 +333,7 @@ private[spark] class Executor(
334
333
* Download any missing dependencies if we receive a new set of files and JARs from the
335
334
* SparkContext. Also adds any new JARs we fetched to the class loader.
336
335
*/
337
- private def updateDependencies (
338
- newFiles : HashMap [String , Long ],
339
- newJars : HashMap [String , Long ]) {
336
+ private def updateDependencies (newFiles : HashMap [String , Long ], newJars : HashMap [String , Long ]) {
340
337
lazy val hadoopConf = SparkHadoopUtil .get.newConfiguration(conf)
341
338
synchronized {
342
339
// Fetch missing dependencies
0 commit comments