|
17 | 17 |
|
18 | 18 | package org.apache.spark.deploy.yarn |
19 | 19 |
|
20 | | -import com.google.common.util.concurrent.ThreadFactoryBuilder |
21 | | - |
22 | 20 | import java.util.concurrent._ |
23 | 21 | import java.util.concurrent.atomic.AtomicInteger |
24 | 22 | import java.util.regex.Pattern |
25 | 23 |
|
26 | 24 | import scala.collection.JavaConversions._ |
27 | 25 | import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} |
28 | 26 |
|
| 27 | +import com.google.common.util.concurrent.ThreadFactoryBuilder |
| 28 | + |
29 | 29 | import org.apache.hadoop.conf.Configuration |
30 | 30 | import org.apache.hadoop.yarn.api.records._ |
31 | 31 | import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse |
@@ -53,7 +53,6 @@ object AllocationType extends Enumeration { |
53 | 53 | // Refer to http://developer.yahoo.com/blogs/hadoop/posts/2011/03/mapreduce-nextgen-scheduler/ for |
54 | 54 | // more info on how we are requesting for containers. |
55 | 55 |
|
56 | | - |
57 | 56 | /** |
58 | 57 | * Acquires resources for executors from a ResourceManager and launches executors in new containers. |
59 | 58 | */ |
@@ -505,8 +504,7 @@ private[yarn] class YarnAllocator( |
505 | 504 | amClient.allocate(progressIndicator) |
506 | 505 | } |
507 | 506 |
|
508 | | - private def createRackResourceRequests( |
509 | | - hostContainers: ArrayBuffer[ContainerRequest]) |
| 507 | + private def createRackResourceRequests(hostContainers: ArrayBuffer[ContainerRequest]) |
510 | 508 | : ArrayBuffer[ContainerRequest] = { |
511 | 509 | // Generate modified racks and new set of hosts under it before issuing requests. |
512 | 510 | val rackToCounts = new HashMap[String, Int]() |
@@ -602,8 +600,7 @@ private[yarn] class YarnAllocator( |
602 | 600 | requestType: AllocationType.AllocationType, |
603 | 601 | resource: String, |
604 | 602 | numExecutors: Int, |
605 | | - priority: Int) |
606 | | - : ArrayBuffer[ContainerRequest] = { |
| 603 | + priority: Int): ArrayBuffer[ContainerRequest] = { |
607 | 604 | // If hostname is specified, then we need at least two requests - node local and rack local. |
608 | 605 | // There must be a third request, which is ANY. That will be specially handled. |
609 | 606 | requestType match { |
|
0 commit comments