Skip to content

Commit fda62cf

Browse files
authored
Merge branch 'trunk' into YARN-11509
2 parents b7cfc3f + 6042d59 commit fda62cf

File tree

17 files changed

+438
-43
lines changed

17 files changed

+438
-43
lines changed

LICENSE-binary

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -259,36 +259,36 @@ io.grpc:grpc-netty:1.26.0
259259
io.grpc:grpc-protobuf:1.26.0
260260
io.grpc:grpc-protobuf-lite:1.26.0
261261
io.grpc:grpc-stub:1.26.0
262-
io.netty:netty-all:4.1.77.Final
263-
io.netty:netty-buffer:4.1.77.Final
264-
io.netty:netty-codec:4.1.77.Final
265-
io.netty:netty-codec-dns:4.1.77.Final
266-
io.netty:netty-codec-haproxy:4.1.77.Final
267-
io.netty:netty-codec-http:4.1.77.Final
268-
io.netty:netty-codec-http2:4.1.77.Final
269-
io.netty:netty-codec-memcache:4.1.77.Final
270-
io.netty:netty-codec-mqtt:4.1.77.Final
271-
io.netty:netty-codec-redis:4.1.77.Final
272-
io.netty:netty-codec-smtp:4.1.77.Final
273-
io.netty:netty-codec-socks:4.1.77.Final
274-
io.netty:netty-codec-stomp:4.1.77.Final
275-
io.netty:netty-codec-xml:4.1.77.Final
276-
io.netty:netty-common:4.1.77.Final
277-
io.netty:netty-handler:4.1.77.Final
278-
io.netty:netty-handler-proxy:4.1.77.Final
279-
io.netty:netty-resolver:4.1.77.Final
280-
io.netty:netty-resolver-dns:4.1.77.Final
281-
io.netty:netty-transport:4.1.77.Final
282-
io.netty:netty-transport-rxtx:4.1.77.Final
283-
io.netty:netty-transport-sctp:4.1.77.Final
284-
io.netty:netty-transport-udt:4.1.77.Final
285-
io.netty:netty-transport-classes-epoll:4.1.77.Final
286-
io.netty:netty-transport-native-unix-common:4.1.77.Final
287-
io.netty:netty-transport-classes-kqueue:4.1.77.Final
288-
io.netty:netty-resolver-dns-classes-macos:4.1.77.Final
289-
io.netty:netty-transport-native-epoll:4.1.77.Final
290-
io.netty:netty-transport-native-kqueue:4.1.77.Final
291-
io.netty:netty-resolver-dns-native-macos:4.1.77.Final
262+
io.netty:netty-all:4.1.94.Final
263+
io.netty:netty-buffer:4.1.94.Final
264+
io.netty:netty-codec:4.1.94.Final
265+
io.netty:netty-codec-dns:4.1.94.Final
266+
io.netty:netty-codec-haproxy:4.1.94.Final
267+
io.netty:netty-codec-http:4.1.94.Final
268+
io.netty:netty-codec-http2:4.1.94.Final
269+
io.netty:netty-codec-memcache:4.1.94.Final
270+
io.netty:netty-codec-mqtt:4.1.94.Final
271+
io.netty:netty-codec-redis:4.1.94.Final
272+
io.netty:netty-codec-smtp:4.1.94.Final
273+
io.netty:netty-codec-socks:4.1.94.Final
274+
io.netty:netty-codec-stomp:4.1.94.Final
275+
io.netty:netty-codec-xml:4.1.94.Final
276+
io.netty:netty-common:4.1.94.Final
277+
io.netty:netty-handler:4.1.94.Final
278+
io.netty:netty-handler-proxy:4.1.94.Final
279+
io.netty:netty-resolver:4.1.94.Final
280+
io.netty:netty-resolver-dns:4.1.94.Final
281+
io.netty:netty-transport:4.1.94.Final
282+
io.netty:netty-transport-rxtx:4.1.94.Final
283+
io.netty:netty-transport-sctp:4.1.94.Final
284+
io.netty:netty-transport-udt:4.1.94.Final
285+
io.netty:netty-transport-classes-epoll:4.1.94.Final
286+
io.netty:netty-transport-native-unix-common:4.1.94.Final
287+
io.netty:netty-transport-classes-kqueue:4.1.94.Final
288+
io.netty:netty-resolver-dns-classes-macos:4.1.94.Final
289+
io.netty:netty-transport-native-epoll:4.1.94.Final
290+
io.netty:netty-transport-native-kqueue:4.1.94.Final
291+
io.netty:netty-resolver-dns-native-macos:4.1.94.Final
292292
io.opencensus:opencensus-api:0.12.3
293293
io.opencensus:opencensus-contrib-grpc-metrics:0.12.3
294294
io.reactivex:rxjava:1.3.8

dev-support/git-jira-validation/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ The script also requires below inputs:
6868
Exact fixVersion that we would like to compare all Jira's fixVersions
6969
with. e.g for 3.3.2 release, it should be 3.3.2.
7070
71-
3. JIRA Project Name:
72-
The exact name of Project as case-sensitive e.g HADOOP / OZONE
71+
3. JIRA Project Name (default Project Name: HADOOP):
72+
The exact name of Project as case-sensitive.
7373
7474
4. Path of project's working dir with release branch checked-in:
7575
Path of project from where we want to compare git hashes from. Local fork
@@ -84,7 +84,7 @@ The script also requires below inputs:
8484

8585
Example of script execution:
8686
```
87-
JIRA Project Name (e.g HADOOP / OZONE etc): HADOOP
87+
JIRA Project Name (default: HADOOP): HADOOP
8888
First commit hash to start excluding commits from history: fa4915fdbbbec434ab41786cb17b82938a613f16
8989
Fix Version: 3.3.2
9090
Jira server url (default: https://issues.apache.org/jira):

dev-support/git-jira-validation/git_jira_fix_version_check.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030

3131
from jira import JIRA
3232

33-
jira_project_name = input("JIRA Project Name (e.g HADOOP / OZONE etc): ") \
33+
jira_project_name = input("JIRA Project Name (default: HADOOP): ") \
3434
or "HADOOP"
3535
# Define project_jira_keys with - appended. e.g for HADOOP Jiras,
3636
# project_jira_keys should include HADOOP-, HDFS-, YARN-, MAPREDUCE-

hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2192,6 +2192,13 @@ function hadoop_daemon_handler
21922192
case ${daemonmode} in
21932193
status)
21942194
hadoop_status_daemon "${daemon_pidfile}"
2195+
if [[ $? == 0 ]]; then
2196+
echo "${daemonname} is running as process $(cat "${daemon_pidfile}")."
2197+
elif [[ $? == 1 ]]; then
2198+
echo "${daemonname} is stopped."
2199+
else
2200+
hadoop_error "hadoop_status_daemon error."
2201+
fi
21952202
exit $?
21962203
;;
21972204

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -874,7 +874,7 @@ private BigInteger getNameserviceAggregatedBigInt(
874874

875875
/**
876876
* Fetches the most active namenode memberships for all known nameservices.
877-
* The fetched membership may not or may not be active. Excludes expired
877+
* The fetched membership may or may not be active. Excludes expired
878878
* memberships.
879879
* @throws IOException if the query could not be performed.
880880
* @return List of the most active NNs from each known nameservice.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ private void chooseEvenlyFromRemainingRacks(Node writer,
170170
NotEnoughReplicasException lastException = e;
171171
int bestEffortMaxNodesPerRack = maxNodesPerRack;
172172
while (results.size() != totalReplicaExpected &&
173-
numResultsOflastChoose != results.size()) {
173+
bestEffortMaxNodesPerRack < totalReplicaExpected) {
174174
// Exclude the chosen nodes
175175
final Set<Node> newExcludeNodes = new HashSet<>();
176176
for (DatanodeStorageInfo resultStorage : results) {
@@ -192,11 +192,22 @@ private void chooseEvenlyFromRemainingRacks(Node writer,
192192
} finally {
193193
excludedNodes.addAll(newExcludeNodes);
194194
}
195+
// To improve performance, the maximum value of 'bestEffortMaxNodesPerRack'
196+
// is calculated only when it is not possible to select a node.
197+
if (numResultsOflastChoose == results.size()) {
198+
Map<String, Integer> nodesPerRack = new HashMap<>();
199+
for (DatanodeStorageInfo dsInfo : results) {
200+
String rackName = dsInfo.getDatanodeDescriptor().getNetworkLocation();
201+
nodesPerRack.merge(rackName, 1, Integer::sum);
202+
}
203+
bestEffortMaxNodesPerRack =
204+
Math.max(bestEffortMaxNodesPerRack, Collections.max(nodesPerRack.values()));
205+
}
195206
}
196207

197-
if (numResultsOflastChoose != totalReplicaExpected) {
208+
if (results.size() != totalReplicaExpected) {
198209
LOG.debug("Best effort placement failed: expecting {} replicas, only "
199-
+ "chose {}.", totalReplicaExpected, numResultsOflastChoose);
210+
+ "chose {}.", totalReplicaExpected, results.size());
200211
throw lastException;
201212
}
202213
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,8 +186,8 @@ public void sendOOB() throws IOException, InterruptedException {
186186
if (br == null) {
187187
return;
188188
}
189-
// This doesn't need to be in a critical section. Althogh the client
190-
// can resue the connection to issue a different request, trying sending
189+
// This doesn't need to be in a critical section. Although the client
190+
// can reuse the connection to issue a different request, trying sending
191191
// an OOB through the recently closed block receiver is harmless.
192192
LOG.info("Sending OOB to peer: {}", peer);
193193
br.sendOOB();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,7 @@ private static FileState analyzeFileState(
624624
// timeout, or because of an HA failover. In that case, we know
625625
// by the fact that the client is re-issuing the RPC that it
626626
// never began to write to the old block. Hence it is safe to
627-
// to return the existing block.
627+
// return the existing block.
628628
// 3) This is an entirely bogus request/bug -- we should error out
629629
// rather than potentially appending a new block with an empty
630630
// one in the middle, etc

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ public void testPipelineRecoveryOnOOB() throws Exception {
327327
// Wait long enough to receive an OOB ack before closing the file.
328328
GenericTestUtils.waitForThreadTermination(
329329
"Async datanode shutdown thread", 100, 10000);
330-
// Retart the datanode
330+
// Restart the datanode
331331
cluster.restartDataNode(0, true);
332332
// The following forces a data packet and end of block packets to be sent.
333333
out.close();

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import org.apache.hadoop.conf.Configuration;
2121
import org.apache.hadoop.fs.Path;
22+
import org.apache.hadoop.fs.StorageType;
2223
import org.apache.hadoop.hdfs.DFSConfigKeys;
2324
import org.apache.hadoop.hdfs.DFSTestUtil;
2425
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -52,6 +53,7 @@
5253
import org.slf4j.Logger;
5354
import org.slf4j.LoggerFactory;
5455

56+
import java.util.Arrays;
5557
import java.util.BitSet;
5658
import java.util.Iterator;
5759
import java.util.List;
@@ -515,4 +517,63 @@ public void testReconstrutionWithBusyBlock1() throws Exception {
515517
assertEquals(9, bm.countNodes(blockInfo).liveReplicas());
516518
}
517519

520+
@Test
521+
public void testReconstructionWithStorageTypeNotEnough() throws Exception {
522+
final HdfsConfiguration conf = new HdfsConfiguration();
523+
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
524+
525+
// Nine disk node eleven archive node.
526+
int numDn = groupSize * 2 + 2;
527+
StorageType[][] storageTypes = new StorageType[numDn][];
528+
Arrays.fill(storageTypes, 0, groupSize,
529+
new StorageType[]{StorageType.DISK, StorageType.DISK});
530+
Arrays.fill(storageTypes, groupSize, numDn,
531+
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE});
532+
533+
// Nine disk racks and one archive rack.
534+
String[] racks = {
535+
"/rack1", "/rack2", "/rack3", "/rack4", "/rack5", "/rack6", "/rack7", "/rack8",
536+
"/rack9", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0", "/rack0",
537+
"/rack0", "/rack0", "/rack0", "/rack0"};
538+
539+
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn)
540+
.storageTypes(storageTypes)
541+
.racks(racks)
542+
.build();
543+
cluster.waitActive();
544+
DistributedFileSystem fs = cluster.getFileSystem();
545+
fs.enableErasureCodingPolicy(
546+
StripedFileTestUtil.getDefaultECPolicy().getName());
547+
548+
try {
549+
fs.mkdirs(dirPath);
550+
fs.setStoragePolicy(dirPath, "COLD");
551+
fs.setErasureCodingPolicy(dirPath,
552+
StripedFileTestUtil.getDefaultECPolicy().getName());
553+
DFSTestUtil.createFile(fs, filePath,
554+
cellSize * dataBlocks * 2, (short) 1, 0L);
555+
556+
// Stop one dn.
557+
LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
558+
LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
559+
DatanodeInfo dnToStop = block.getLocations()[0];
560+
cluster.stopDataNode(dnToStop.getXferAddr());
561+
cluster.setDataNodeDead(dnToStop);
562+
563+
// Wait for reconstruction to happen.
564+
StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
565+
blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
566+
block = (LocatedStripedBlock) blks.getLastLocatedBlock();
567+
BitSet bitSet = new BitSet(groupSize);
568+
for (byte index : block.getBlockIndices()) {
569+
bitSet.set(index);
570+
}
571+
for (int i = 0; i < groupSize; i++) {
572+
Assert.assertTrue(bitSet.get(i));
573+
}
574+
} finally {
575+
cluster.shutdown();
576+
}
577+
}
578+
518579
}

0 commit comments

Comments
 (0)