@@ -209,16 +209,15 @@ private[spark] class TaskSchedulerImpl(
209209 def resourceOffers (offers : Seq [WorkerOffer ]): Seq [Seq [TaskDescription ]] = synchronized {
210210 SparkEnv .set(sc.env)
211211
212- val sortedTaskSets = rootPool.getSortedTaskSetQueue
213212 // Mark each slave as alive and remember its hostname
213+ // Also track if new executor is added
214+ var newExecAvail = false
214215 for (o <- offers) {
215216 executorIdToHost(o.executorId) = o.host
216217 if (! executorsByHost.contains(o.host)) {
217218 executorsByHost(o.host) = new HashSet [String ]()
218219 executorAdded(o.executorId, o.host)
219- for (taskSet <- sortedTaskSets) {
220- taskSet.executorAdded(o.executorId, o.host)
221- }
220+ newExecAvail = true
222221 }
223222 }
224223
@@ -227,9 +226,13 @@ private[spark] class TaskSchedulerImpl(
227226 // Build a list of tasks to assign to each worker.
228227 val tasks = shuffledOffers.map(o => new ArrayBuffer [TaskDescription ](o.cores))
229228 val availableCpus = shuffledOffers.map(o => o.cores).toArray
229+ val sortedTaskSets = rootPool.getSortedTaskSetQueue
230230 for (taskSet <- sortedTaskSets) {
231231 logDebug(" parentName: %s, name: %s, runningTasks: %s" .format(
232232 taskSet.parent.name, taskSet.name, taskSet.runningTasks))
233+ if (newExecAvail) {
234+ taskSet.executorAdded()
235+ }
233236 }
234237
235238 // Take each TaskSet in our scheduling order, and then offer it each node in increasing order
0 commit comments