Skip to content

Commit 484f8e2

Browse files
committed
Revert "rename WorkerDecommission message to DecommissionWorker. (CR feedback)"
This reverts commit 50b9cb2.
1 parent 50b9cb2 commit 484f8e2

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ private[deploy] object DeployMessages {
6464
* @param id the worker id
6565
* @param worker the worker endpoint ref
6666
*/
67-
case class DecommissionWorker(
67+
case class WorkerDecommission(
6868
id: String,
6969
worker: RpcEndpointRef)
7070
extends DeployMessage

core/src/main/scala/org/apache/spark/deploy/master/Master.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ private[deploy] class Master(
245245
logError("Leadership has been revoked -- master shutting down.")
246246
System.exit(0)
247247

248-
case DecommissionWorker(id, workerRef) =>
248+
case WorkerDecommission(id, workerRef) =>
249249
logInfo("Recording worker %s decommissioning".format(id))
250250
if (state == RecoveryState.STANDBY) {
251251
workerRef.send(MasterInStandby)
@@ -874,7 +874,7 @@ private[deploy] class Master(
874874

875875
/**
876876
* Decommission all workers that are active on any of the given hostnames. The decommissioning is
877-
* asynchronously done by enqueueing DecommissionWorker messages to self. No checks are done about
877+
* asynchronously done by enqueueing WorkerDecommission messages to self. No checks are done about
878878
* the prior state of the worker. So an already decommissioned worker will match as well.
879879
*
880880
* @param hostnames: A list of hostnames without the ports. Like "localhost", "foo.bar.com" etc
@@ -893,7 +893,7 @@ private[deploy] class Master(
893893
// The workers are removed async to avoid blocking the receive loop for the entire batch
894894
workersToRemove.foreach(wi => {
895895
logInfo(s"Sending the worker decommission to ${wi.id} and ${wi.endpoint}")
896-
self.send(DecommissionWorker(wi.id, wi.endpoint))
896+
self.send(WorkerDecommission(wi.id, wi.endpoint))
897897
})
898898

899899
// Return the count of workers actually removed

core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -668,7 +668,7 @@ private[deploy] class Worker(
668668
finishedApps += id
669669
maybeCleanupApplication(id)
670670

671-
case DecommissionWorker(_, _) =>
671+
case WorkerDecommission(_, _) =>
672672
decommissionSelf()
673673
}
674674

@@ -772,7 +772,7 @@ private[deploy] class Worker(
772772
if (conf.get(WORKER_DECOMMISSION_ENABLED)) {
773773
logDebug("Decommissioning self")
774774
decommissioned = true
775-
sendToMaster(DecommissionWorker(workerId, self))
775+
sendToMaster(WorkerDecommission(workerId, self))
776776
} else {
777777
logWarning("Asked to decommission self, but decommissioning not enabled")
778778
}

0 commit comments

Comments
 (0)