Skip to content

Commit 2791158

Browse files
committed
Review feedback
1 parent c23507b commit 2791158

File tree

2 files changed

+9
-12
lines changed

2 files changed

+9
-12
lines changed

yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,15 @@
1717

1818
package org.apache.spark.deploy.yarn
1919

20-
import com.google.common.util.concurrent.ThreadFactoryBuilder
21-
2220
import java.util.concurrent._
2321
import java.util.concurrent.atomic.AtomicInteger
2422
import java.util.regex.Pattern
2523

2624
import scala.collection.JavaConversions._
2725
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
2826

27+
import com.google.common.util.concurrent.ThreadFactoryBuilder
28+
2929
import org.apache.hadoop.conf.Configuration
3030
import org.apache.hadoop.yarn.api.records._
3131
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse
@@ -53,7 +53,6 @@ object AllocationType extends Enumeration {
5353
// Refer to http://developer.yahoo.com/blogs/hadoop/posts/2011/03/mapreduce-nextgen-scheduler/ for
5454
// more info on how we are requesting for containers.
5555

56-
5756
/**
5857
* Acquires resources for executors from a ResourceManager and launches executors in new containers.
5958
*/
@@ -505,8 +504,7 @@ private[yarn] class YarnAllocator(
505504
amClient.allocate(progressIndicator)
506505
}
507506

508-
private def createRackResourceRequests(
509-
hostContainers: ArrayBuffer[ContainerRequest])
507+
private def createRackResourceRequests(hostContainers: ArrayBuffer[ContainerRequest])
510508
: ArrayBuffer[ContainerRequest] = {
511509
// Generate modified racks and new set of hosts under it before issuing requests.
512510
val rackToCounts = new HashMap[String, Int]()
@@ -602,8 +600,7 @@ private[yarn] class YarnAllocator(
602600
requestType: AllocationType.AllocationType,
603601
resource: String,
604602
numExecutors: Int,
605-
priority: Int)
606-
: ArrayBuffer[ContainerRequest] = {
603+
priority: Int): ArrayBuffer[ContainerRequest] = {
607604
// If hostname is specified, then we need at least two requests - node local and rack local.
608605
// There must be a third request, which is ANY. That will be specially handled.
609606
requestType match {

yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClient.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@ package org.apache.spark.deploy.yarn
1919

2020
import java.util.{List => JList}
2121

22+
import scala.collection.JavaConversions._
23+
import scala.collection.{Map, Set}
24+
import scala.util.Try
25+
2226
import org.apache.hadoop.conf.Configuration
2327
import org.apache.hadoop.yarn.api.ApplicationConstants
2428
import org.apache.hadoop.yarn.api.records._
@@ -32,10 +36,6 @@ import org.apache.spark.{Logging, SecurityManager, SparkConf}
3236
import org.apache.spark.scheduler.SplitInfo
3337
import org.apache.spark.util.Utils
3438

35-
import scala.collection.JavaConversions._
36-
import scala.collection.{Map, Set}
37-
import scala.util.Try
38-
3939
/**
4040
* Handles registering and unregistering the application with the YARN ResourceManager.
4141
*/
@@ -120,7 +120,7 @@ private[spark] class YarnRMClient(args: ApplicationMasterArguments) extends Logg
120120
}
121121

122122
/** Returns the maximum number of attempts to register the AM. */
123-
def getMaxRegAttempts(conf: YarnConfiguration) =
123+
def getMaxRegAttempts(conf: YarnConfiguration): Int =
124124
conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)
125125

126126
}

0 commit comments

Comments
 (0)