Skip to content

Commit 1df31fd

Browse files
committed
backmerge branch-3.3
2 parents fb81998 + 6306f5b commit 1df31fd

File tree

112 files changed

+1574
-262
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

112 files changed

+1574
-262
lines changed

hadoop-common-project/hadoop-common/src/main/bin/hadoop

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@ MYNAME="${BASH_SOURCE-$0}"
2626
function hadoop_usage
2727
{
2828
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
29-
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
29+
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode"
3030
hadoop_add_option "loglevel level" "set the log4j level for this command"
31-
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
31+
hadoop_add_option "hosts filename" "list of hosts to use in worker mode"
3232
hadoop_add_option "workers" "turn on worker mode"
3333

3434
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"

hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# limitations under the License.
1717

1818

19-
# Run a Hadoop command on all slave hosts.
19+
# Run a Hadoop command on all worker hosts.
2020

2121
function hadoop_usage
2222
{

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,10 +86,10 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
8686
private final List<LightWeightLinkedSet<BlockInfo>> priorityQueues
8787
= new ArrayList<>(LEVEL);
8888

89-
/** The number of corrupt blocks with replication factor 1 */
9089

9190
private final LongAdder lowRedundancyBlocks = new LongAdder();
9291
private final LongAdder corruptBlocks = new LongAdder();
92+
/** The number of corrupt blocks with replication factor 1 */
9393
private final LongAdder corruptReplicationOneBlocks = new LongAdder();
9494
private final LongAdder lowRedundancyECBlockGroups = new LongAdder();
9595
private final LongAdder corruptECBlockGroups = new LongAdder();
@@ -367,11 +367,11 @@ synchronized boolean remove(BlockInfo block,
367367
* @return true if the block was found and removed from one of the priority
368368
* queues
369369
*/
370-
boolean remove(BlockInfo block, int priLevel) {
370+
synchronized boolean remove(BlockInfo block, int priLevel) {
371371
return remove(block, priLevel, block.getReplication());
372372
}
373373

374-
boolean remove(BlockInfo block, int priLevel, int oldExpectedReplicas) {
374+
synchronized boolean remove(BlockInfo block, int priLevel, int oldExpectedReplicas) {
375375
if(priLevel >= 0 && priLevel < LEVEL
376376
&& priorityQueues.get(priLevel).remove(block)) {
377377
NameNode.blockStateChangeLog.debug(

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@
6262
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
6363
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
6464
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
65+
import org.apache.hadoop.hdfs.server.protocol.InvalidBlockReportLeaseException;
6566
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
6667
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
6768
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
@@ -778,6 +779,9 @@ private void offerService() throws Exception {
778779
shouldServiceRun = false;
779780
return;
780781
}
782+
if (InvalidBlockReportLeaseException.class.getName().equals(reClass)) {
783+
fullBlockReportLeaseId = 0;
784+
}
781785
LOG.warn("RemoteException in offerService", re);
782786
sleepAfterException();
783787
} catch (IOException e) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@
173173
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
174174
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
175175
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
176+
import org.apache.hadoop.hdfs.server.protocol.InvalidBlockReportLeaseException;
176177
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
177178
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
178179
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -1635,6 +1636,8 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg,
16351636
bm.processReport(nodeReg, reports[index].getStorage(),
16361637
blocks, context));
16371638
}
1639+
} else {
1640+
throw new InvalidBlockReportLeaseException(context.getReportId(), context.getLeaseId());
16381641
}
16391642
} catch (UnregisteredNodeException une) {
16401643
LOG.debug("Datanode {} is attempting to report but not register yet.",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.hdfs.server.protocol;
20+
21+
import java.io.IOException;
22+
23+
import org.apache.hadoop.classification.InterfaceAudience;
24+
import org.apache.hadoop.classification.InterfaceStability;
25+
26+
/**
27+
* This exception is thrown when a datanode sends a full block report but it is
28+
* rejected by the Namenode due to an invalid lease (expired or otherwise).
29+
*
30+
*/
31+
@InterfaceAudience.Private
32+
@InterfaceStability.Evolving
33+
public class InvalidBlockReportLeaseException extends IOException {
34+
/** for java.io.Serializable. */
35+
private static final long serialVersionUID = 1L;
36+
37+
public InvalidBlockReportLeaseException(long blockReportID, long leaseID) {
38+
super("Block report 0x" + Long.toHexString(blockReportID) + " was rejected as lease 0x"
39+
+ Long.toHexString(leaseID) + " is invalid");
40+
}
41+
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
3030
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
3131
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
32+
import org.apache.hadoop.hdfs.server.protocol.InvalidBlockReportLeaseException;
3233
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
3334
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
3435
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
@@ -40,12 +41,14 @@
4041
import java.util.ArrayList;
4142
import java.util.List;
4243
import java.util.Random;
44+
import java.util.concurrent.ExecutionException;
4345
import java.util.concurrent.ExecutorService;
4446
import java.util.concurrent.Executors;
4547
import java.util.concurrent.Future;
4648

4749
import static org.junit.Assert.assertEquals;
4850
import static org.junit.Assert.assertTrue;
51+
import static org.junit.jupiter.api.Assertions.assertNotNull;
4952
import static org.mockito.ArgumentMatchers.any;
5053
import static org.mockito.Mockito.doAnswer;
5154
import static org.mockito.Mockito.spy;
@@ -136,6 +139,72 @@ public void testCheckBlockReportLease() throws Exception {
136139
}
137140
}
138141

142+
@Test
143+
public void testExceptionThrownWhenFBRLeaseExpired() throws Exception {
144+
HdfsConfiguration conf = new HdfsConfiguration();
145+
Random rand = new Random();
146+
147+
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
148+
.numDataNodes(1).build()) {
149+
cluster.waitActive();
150+
151+
FSNamesystem fsn = cluster.getNamesystem();
152+
BlockManager blockManager = fsn.getBlockManager();
153+
BlockManager spyBlockManager = spy(blockManager);
154+
fsn.setBlockManagerForTesting(spyBlockManager);
155+
String poolId = cluster.getNamesystem().getBlockPoolId();
156+
157+
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
158+
159+
// Test based on one DataNode report to Namenode
160+
DataNode dn = cluster.getDataNodes().get(0);
161+
DatanodeDescriptor datanodeDescriptor = spyBlockManager
162+
.getDatanodeManager().getDatanode(dn.getDatanodeId());
163+
164+
DatanodeRegistration dnRegistration = dn.getDNRegistrationForBP(poolId);
165+
StorageReport[] storages = dn.getFSDataset().getStorageReports(poolId);
166+
167+
// Send heartbeat and request full block report lease
168+
HeartbeatResponse hbResponse = rpcServer.sendHeartbeat(
169+
dnRegistration, storages, 0, 0, 0, 0, 0, null, true,
170+
SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
171+
172+
// Remove full block report lease about dn
173+
spyBlockManager.getBlockReportLeaseManager()
174+
.removeLease(datanodeDescriptor);
175+
176+
ExecutorService pool = Executors.newFixedThreadPool(1);
177+
178+
// Trigger sendBlockReport
179+
BlockReportContext brContext = new BlockReportContext(1, 0,
180+
rand.nextLong(), hbResponse.getFullBlockReportLeaseId());
181+
Future<DatanodeCommand> sendBRfuturea = pool.submit(() -> {
182+
// Build every storage with 100 blocks for sending report
183+
DatanodeStorage[] datanodeStorages
184+
= new DatanodeStorage[storages.length];
185+
for (int i = 0; i < storages.length; i++) {
186+
datanodeStorages[i] = storages[i].getStorage();
187+
}
188+
StorageBlockReport[] reports = createReports(datanodeStorages, 100);
189+
190+
// Send blockReport
191+
return rpcServer.blockReport(dnRegistration, poolId, reports,
192+
brContext);
193+
});
194+
195+
// Get result, it will not null if process successfully
196+
ExecutionException exception = null;
197+
try {
198+
sendBRfuturea.get();
199+
} catch (ExecutionException e) {
200+
exception = e;
201+
}
202+
assertNotNull(exception);
203+
assertEquals(InvalidBlockReportLeaseException.class,
204+
exception.getCause().getClass());
205+
}
206+
}
207+
139208
private StorageBlockReport[] createReports(DatanodeStorage[] dnStorages,
140209
int numBlocks) {
141210
int longsPerBlock = 3;

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import org.apache.hadoop.fs.StorageType;
2525
import org.apache.hadoop.hdfs.HdfsConfiguration;
2626
import org.apache.hadoop.hdfs.MiniDFSCluster;
27+
import org.apache.hadoop.hdfs.server.protocol.InvalidBlockReportLeaseException;
2728
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
2829

2930
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
@@ -38,7 +39,6 @@
3839

3940
import java.io.File;
4041
import java.io.IOException;
41-
import java.net.ConnectException;
4242
import java.net.InetSocketAddress;
4343
import java.util.ArrayList;
4444
import java.util.Collections;
@@ -1156,8 +1156,9 @@ public Object answer(InvocationOnMock invocation)
11561156
// just reject and wait until DN request for a new leaseId
11571157
if(leaseId == 1) {
11581158
firstLeaseId = leaseId;
1159-
throw new ConnectException(
1160-
"network is not reachable for test. ");
1159+
InvalidBlockReportLeaseException e =
1160+
new InvalidBlockReportLeaseException(context.getReportId(), 1);
1161+
throw new RemoteException(e.getClass().getName(), e.getMessage());
11611162
} else {
11621163
secondLeaseId = leaseId;
11631164
return null;

hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,14 @@ Changing the policy from the default value, `"delete"` _is not backwards compati
2929
Versions of Hadoop which are incompatible with other marker retention policies,
3030
as of August 2020.
3131

32-
-------------------------------------------------------
3332
| Branch | Compatible Since | Supported |
3433
|------------|------------------|---------------------|
3534
| Hadoop 2.x | n/a | WONTFIX |
3635
| Hadoop 3.0 | check | Read-only |
3736
| Hadoop 3.1 | check | Read-only |
3837
| Hadoop 3.2 | check | Read-only |
3938
| Hadoop 3.3 | 3.3.1 | Done |
40-
-------------------------------------------------------
39+
4140

4241
*WONTFIX*
4342

hadoop-tools/hadoop-azure/src/config/checkstyle-suppressions.xml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,11 @@
4848
files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]utils[\\/]Base64.java"/>
4949
<suppress checks="ParameterNumber|VisibilityModifier"
5050
files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]ITestSmallWriteOptimization.java"/>
51+
<suppress checks="VisibilityModifier"
52+
files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]services[\\/]ITestAbfsRestOperation.java"/>
5153
<!-- allow tests to use _ for ordering. -->
5254
<suppress checks="MethodName"
5355
files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]commit[\\/]ITestAbfsTerasort.java"/>
56+
<suppress checks="ParameterNumber"
57+
files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]services[\\/]TestAbfsOutputStream.java"/>
5458
</suppressions>

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,11 @@ public class AbfsConfiguration{
117117
DefaultValue = DEFAULT_OPTIMIZE_FOOTER_READ)
118118
private boolean optimizeFooterRead;
119119

120+
@BooleanConfigurationValidatorAnnotation(
121+
ConfigurationKey = FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED,
122+
DefaultValue = DEFAULT_FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED)
123+
private boolean isExpectHeaderEnabled;
124+
120125
@BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ACCOUNT_LEVEL_THROTTLING_ENABLED,
121126
DefaultValue = DEFAULT_FS_AZURE_ACCOUNT_LEVEL_THROTTLING_ENABLED)
122127
private boolean accountThrottlingEnabled;
@@ -706,6 +711,10 @@ public String getAppendBlobDirs() {
706711
return this.azureAppendBlobDirs;
707712
}
708713

714+
public boolean isExpectHeaderEnabled() {
715+
return this.isExpectHeaderEnabled;
716+
}
717+
709718
public boolean accountThrottlingEnabled() {
710719
return accountThrottlingEnabled;
711720
}

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -693,6 +693,7 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(
693693
}
694694
return new AbfsOutputStreamContext(abfsConfiguration.getSasTokenRenewPeriodForStreamsInSeconds())
695695
.withWriteBufferSize(bufferSize)
696+
.enableExpectHeader(abfsConfiguration.isExpectHeaderEnabled())
696697
.enableFlush(abfsConfiguration.isFlushEnabled())
697698
.enableSmallWriteOptimization(abfsConfiguration.isSmallWriteOptimizationEnabled())
698699
.disableOutputStreamFlush(abfsConfiguration.isOutputStreamFlushDisabled())

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,11 @@ public final class AbfsHttpConstants {
6464
public static final String HTTP_METHOD_PATCH = "PATCH";
6565
public static final String HTTP_METHOD_POST = "POST";
6666
public static final String HTTP_METHOD_PUT = "PUT";
67+
/**
68+
* All status codes less than http 100 signify error
69+
* and should qualify for retry.
70+
*/
71+
public static final int HTTP_CONTINUE = 100;
6772

6873
// Abfs generic constants
6974
public static final String SINGLE_WHITE_SPACE = " ";
@@ -103,6 +108,9 @@ public final class AbfsHttpConstants {
103108
public static final String DEFAULT_SCOPE = "default:";
104109
public static final String PERMISSION_FORMAT = "%04d";
105110
public static final String SUPER_USER = "$superuser";
111+
// The HTTP 100 Continue informational status response code indicates that everything so far
112+
// is OK and that the client should continue with the request or ignore it if it is already finished.
113+
public static final String HUNDRED_CONTINUE = "100-continue";
106114

107115
public static final char CHAR_FORWARD_SLASH = '/';
108116
public static final char CHAR_EXCLAMATION_POINT = '!';

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ public final class ConfigurationKeys {
3535
* path to determine HNS status.
3636
*/
3737
public static final String FS_AZURE_ACCOUNT_IS_HNS_ENABLED = "fs.azure.account.hns.enabled";
38+
/**
39+
* Enable or disable expect hundred continue header.
40+
* Value: {@value}.
41+
*/
42+
public static final String FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED = "fs.azure.account.expect.header.enabled";
3843
public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key";
3944
public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME_REGX = "fs\\.azure\\.account\\.key\\.(.*)";
4045
public static final String FS_AZURE_SECURE_MODE = "fs.azure.secure.mode";

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
public final class FileSystemConfigurations {
3333

3434
public static final String DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED = "";
35-
35+
public static final boolean DEFAULT_FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED = true;
3636
public static final String USER_HOME_DIRECTORY_PREFIX = "/user";
3737

3838
private static final int SIXTY_SECONDS = 60 * 1000;

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ public final class HttpHeaderConfigurations {
7070
public static final String X_MS_LEASE_ID = "x-ms-lease-id";
7171
public static final String X_MS_PROPOSED_LEASE_ID = "x-ms-proposed-lease-id";
7272
public static final String X_MS_LEASE_BREAK_PERIOD = "x-ms-lease-break-period";
73+
public static final String EXPECT = "Expect";
7374

7475
private HttpHeaderConfigurations() {}
7576
}

hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/InvalidAbfsRestOperationException.java

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,33 @@
3030
@InterfaceAudience.Public
3131
@InterfaceStability.Evolving
3232
public class InvalidAbfsRestOperationException extends AbfsRestOperationException {
33+
34+
private static final String ERROR_MESSAGE = "InvalidAbfsRestOperationException";
35+
3336
public InvalidAbfsRestOperationException(
3437
final Exception innerException) {
3538
super(
3639
AzureServiceErrorCode.UNKNOWN.getStatusCode(),
3740
AzureServiceErrorCode.UNKNOWN.getErrorCode(),
3841
innerException != null
3942
? innerException.toString()
40-
: "InvalidAbfsRestOperationException",
43+
: ERROR_MESSAGE,
4144
innerException);
4245
}
46+
47+
/**
48+
* Adds the retry count along with the exception.
49+
* @param innerException The inner exception which is originally caught.
50+
* @param retryCount The retry count when the exception was thrown.
51+
*/
52+
public InvalidAbfsRestOperationException(
53+
final Exception innerException, int retryCount) {
54+
super(
55+
AzureServiceErrorCode.UNKNOWN.getStatusCode(),
56+
AzureServiceErrorCode.UNKNOWN.getErrorCode(),
57+
innerException != null
58+
? innerException.toString()
59+
: ERROR_MESSAGE + " RetryCount: " + retryCount,
60+
innerException);
61+
}
4362
}

0 commit comments

Comments
 (0)