Skip to content

Commit bfadf11

Browse files
committed
HDFS-9393. After choosing favored nodes, choosing nodes for remaining replicas should go through BlockPlacementPolicy (Contributed by J.Andreina)
1 parent 061c05c commit bfadf11

File tree

4 files changed

+87
-4
lines changed

4 files changed

+87
-4
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2519,6 +2519,10 @@ Release 2.8.0 - UNRELEASED
25192519
HDFS-9571. Fix ASF Licence warnings in Jenkins reports
25202520
(Brahma Reddy Battula via cnauroth)
25212521

2522+
HDFS-9393. After choosing favored nodes, choosing nodes for remaining
2523+
replicas should go through BlockPlacementPolicy
2524+
(J.Andreina via vinayakumarb)
2525+
25222526
Release 2.7.3 - UNRELEASED
25232527

25242528
INCOMPATIBLE CHANGES

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,11 +147,18 @@ DatanodeStorageInfo[] chooseTarget(String src,
147147
avoidStaleNodes, storageTypes);
148148

149149
if (results.size() < numOfReplicas) {
150-
// Not enough favored nodes, choose other nodes.
150+
// Not enough favored nodes, choose other nodes, based on block
151+
// placement policy (HDFS-9393).
151152
numOfReplicas -= results.size();
152-
DatanodeStorageInfo[] remainingTargets =
153-
chooseTarget(src, numOfReplicas, writer, results,
154-
false, favoriteAndExcludedNodes, blocksize, storagePolicy);
153+
for (DatanodeStorageInfo storage : results) {
154+
// add localMachine and related nodes to favoriteAndExcludedNodes
155+
addToExcludedNodes(storage.getDatanodeDescriptor(),
156+
favoriteAndExcludedNodes);
157+
}
158+
DatanodeStorageInfo[] remainingTargets =
159+
chooseTarget(src, numOfReplicas, writer,
160+
new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
161+
favoriteAndExcludedNodes, blocksize, storagePolicy);
155162
for (int i = 0; i < remainingTargets.length; i++) {
156163
results.add(remainingTargets[i]);
157164
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1464,4 +1464,44 @@ public void testupdateNeededReplicationsDoesNotCauseSkippedReplication()
14641464
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
14651465
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
14661466
}
1467+
1468+
/**
1469+
* In this testcase, passed 2 favored nodes dataNodes[0],dataNodes[1]
1470+
*
1471+
* Both favored nodes should be chosen as target for placing replication and
1472+
* then should fall into BlockPlacement policy for choosing remaining targets
1473+
* ie. third target as local writer rack , forth target on remote rack and
1474+
* fifth on same rack as second.
1475+
*
1476+
* @throws Exception
1477+
*/
1478+
@Test
1479+
public void testChooseExcessReplicaApartFromFavoredNodes() throws Exception {
1480+
DatanodeStorageInfo[] targets;
1481+
List<DatanodeDescriptor> expectedTargets =
1482+
new ArrayList<DatanodeDescriptor>();
1483+
expectedTargets.add(dataNodes[0]);
1484+
expectedTargets.add(dataNodes[1]);
1485+
expectedTargets.add(dataNodes[2]);
1486+
expectedTargets.add(dataNodes[4]);
1487+
expectedTargets.add(dataNodes[5]);
1488+
List<DatanodeDescriptor> favouredNodes =
1489+
new ArrayList<DatanodeDescriptor>();
1490+
favouredNodes.add(dataNodes[0]);
1491+
favouredNodes.add(dataNodes[1]);
1492+
targets = chooseTarget(5, dataNodes[2], null, favouredNodes);
1493+
assertEquals(targets.length, 5);
1494+
for (int i = 0; i < targets.length; i++) {
1495+
assertTrue("Target should be a part of Expected Targets",
1496+
expectedTargets.contains(targets[i].getDatanodeDescriptor()));
1497+
}
1498+
}
1499+
1500+
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
1501+
DatanodeDescriptor writer, Set<Node> excludedNodes,
1502+
List<DatanodeDescriptor> favoredNodes) {
1503+
return replicator.chooseTarget(filename, numOfReplicas, writer,
1504+
excludedNodes, BLOCK_SIZE, favoredNodes,
1505+
TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
1506+
}
14671507
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -781,4 +781,36 @@ public void testChooseFavoredNodesNodeGroup() throws Exception {
781781
assertTrue("2nd Replica is incorrect",
782782
expectedTargets.contains(targets[1].getDatanodeDescriptor()));
783783
}
784+
785+
/**
786+
* In this testcase, passed 3 favored nodes
787+
* dataNodes[0],dataNodes[1],dataNodes[2]
788+
*
789+
* Favored nodes on different nodegroup should be selected. Remaining replica
790+
* should go through BlockPlacementPolicy.
791+
*
792+
* @throws Exception
793+
*/
794+
@Test
795+
public void testChooseRemainingReplicasApartFromFavoredNodes()
796+
throws Exception {
797+
DatanodeStorageInfo[] targets;
798+
List<DatanodeDescriptor> expectedTargets =
799+
new ArrayList<DatanodeDescriptor>();
800+
expectedTargets.add(dataNodes[0]);
801+
expectedTargets.add(dataNodes[2]);
802+
expectedTargets.add(dataNodes[3]);
803+
expectedTargets.add(dataNodes[6]);
804+
expectedTargets.add(dataNodes[7]);
805+
List<DatanodeDescriptor> favouredNodes =
806+
new ArrayList<DatanodeDescriptor>();
807+
favouredNodes.add(dataNodes[0]);
808+
favouredNodes.add(dataNodes[1]);
809+
favouredNodes.add(dataNodes[2]);
810+
targets = chooseTarget(3, dataNodes[3], null, favouredNodes);
811+
for (int i = 0; i < targets.length; i++) {
812+
assertTrue("Target should be a part of Expected Targets",
813+
expectedTargets.contains(targets[i].getDatanodeDescriptor()));
814+
}
815+
}
784816
}

0 commit comments

Comments
 (0)