Skip to content

Commit ea6a4a3

Browse files
authored
Merge branch 'trunk' into YARN-11180
2 parents 41b7d8a + 213ea03 commit ea6a4a3

File tree

24 files changed

+1067
-70
lines changed

24 files changed

+1067
-70
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -181,15 +181,20 @@ public static final RetryPolicy retryByRemoteException(
181181
}
182182

183183
/**
184-
* A retry policy for exceptions other than RemoteException.
184+
* <p>
185+
* A retry policy where RemoteException and SaslException are not retried, other individual
186+
* exception types can have RetryPolicy overrides, and any other exception type without an
187+
* override is not retried.
188+
* </p>
189+
*
185190
* @param defaultPolicy defaultPolicy.
186191
* @param exceptionToPolicyMap exceptionToPolicyMap.
187192
* @return RetryPolicy.
188193
*/
189-
public static final RetryPolicy retryOtherThanRemoteException(
194+
public static final RetryPolicy retryOtherThanRemoteAndSaslException(
190195
RetryPolicy defaultPolicy,
191196
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
192-
return new OtherThanRemoteExceptionDependentRetry(defaultPolicy,
197+
return new OtherThanRemoteAndSaslExceptionDependentRetry(defaultPolicy,
193198
exceptionToPolicyMap);
194199
}
195200

@@ -589,12 +594,12 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
589594
}
590595
}
591596

592-
static class OtherThanRemoteExceptionDependentRetry implements RetryPolicy {
597+
static class OtherThanRemoteAndSaslExceptionDependentRetry implements RetryPolicy {
593598

594599
private RetryPolicy defaultPolicy;
595600
private Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
596601

597-
public OtherThanRemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
602+
OtherThanRemoteAndSaslExceptionDependentRetry(RetryPolicy defaultPolicy,
598603
Map<Class<? extends Exception>,
599604
RetryPolicy> exceptionToPolicyMap) {
600605
this.defaultPolicy = defaultPolicy;
@@ -605,10 +610,8 @@ public OtherThanRemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
605610
public RetryAction shouldRetry(Exception e, int retries, int failovers,
606611
boolean isIdempotentOrAtMostOnce) throws Exception {
607612
RetryPolicy policy = null;
608-
// ignore Remote Exception
609-
if (e instanceof RemoteException) {
610-
// do nothing
611-
} else {
613+
// ignore RemoteException and SaslException
614+
if (!(e instanceof RemoteException || isSaslFailure(e))) {
612615
policy = exceptionToPolicyMap.get(e.getClass());
613616
}
614617
if (policy == null) {

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161

6262
import javax.net.SocketFactory;
6363
import javax.security.sasl.Sasl;
64+
import javax.security.sasl.SaslException;
6465
import java.io.*;
6566
import java.net.*;
6667
import java.nio.ByteBuffer;
@@ -1620,7 +1621,8 @@ private Writable getRpcResponse(final Call call, final Connection connection,
16201621
}
16211622

16221623
if (call.error != null) {
1623-
if (call.error instanceof RemoteException) {
1624+
if (call.error instanceof RemoteException ||
1625+
call.error instanceof SaslException) {
16241626
call.error.fillInStackTrace();
16251627
throw call.error;
16261628
} else { // local exception

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,14 @@ private SaslClient createSaslClient(SaslAuth authType)
237237
LOG.debug("client isn't using kerberos");
238238
return null;
239239
}
240-
String serverPrincipal = getServerPrincipal(authType);
240+
final String serverPrincipal;
241+
try {
242+
serverPrincipal = getServerPrincipal(authType);
243+
} catch (IllegalArgumentException ex) {
244+
// YARN-11210: getServerPrincipal can throw IllegalArgumentException if Kerberos
245+
// configuration is bad, this is surfaced as a non-retryable SaslException
246+
throw new SaslException("Bad Kerberos server principal configuration", ex);
247+
}
241248
if (serverPrincipal == null) {
242249
LOG.debug("protocol doesn't use kerberos");
243250
return null;

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ public void testRetryOtherThanRemoteException() throws Throwable {
291291

292292
UnreliableInterface unreliable = (UnreliableInterface)
293293
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
294-
retryOtherThanRemoteException(TRY_ONCE_THEN_FAIL,
294+
retryOtherThanRemoteAndSaslException(TRY_ONCE_THEN_FAIL,
295295
exceptionToPolicyMap));
296296
// should retry with local IOException.
297297
unreliable.failsOnceWithIOException();

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,8 @@ FileChecksum makeCompositeCrcResult() throws IOException {
303303
byte[] blockChecksumBytes = blockChecksumBuf.getData();
304304

305305
long sumBlockLengths = 0;
306-
for (int i = 0; i < locatedBlocks.size() - 1; ++i) {
306+
int i = 0;
307+
for (; i < locatedBlocks.size() - 1; ++i) {
307308
LocatedBlock block = locatedBlocks.get(i);
308309
// For everything except the last LocatedBlock, we expect getBlockSize()
309310
// to accurately reflect the number of file bytes digested in the block
@@ -316,19 +317,8 @@ FileChecksum makeCompositeCrcResult() throws IOException {
316317
"Added blockCrc 0x{} for block index {} of size {}",
317318
Integer.toString(blockCrc, 16), i, block.getBlockSize());
318319
}
319-
320-
// NB: In some cases the located blocks have their block size adjusted
321-
// explicitly based on the requested length, but not all cases;
322-
// these numbers may or may not reflect actual sizes on disk.
323-
long reportedLastBlockSize =
324-
blockLocations.getLastLocatedBlock().getBlockSize();
325-
long consumedLastBlockLength = reportedLastBlockSize;
326-
if (length - sumBlockLengths < reportedLastBlockSize) {
327-
LOG.warn(
328-
"Last block length {} is less than reportedLastBlockSize {}",
329-
length - sumBlockLengths, reportedLastBlockSize);
330-
consumedLastBlockLength = length - sumBlockLengths;
331-
}
320+
LocatedBlock nextBlock = locatedBlocks.get(i);
321+
long consumedLastBlockLength = Math.min(length - sumBlockLengths, nextBlock.getBlockSize());
332322
// NB: blockChecksumBytes.length may be much longer than actual bytes
333323
// written into the DataOutput.
334324
int lastBlockCrc = CrcUtil.readInt(

hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,11 @@ set (LIBHDFS_TESTS_DIR ../../libhdfs-tests)
2424
set (LIBHDFSPP_SRC_DIR ..)
2525
set (LIBHDFSPP_LIB_DIR ${LIBHDFSPP_SRC_DIR}/lib)
2626
set (LIBHDFSPP_BINDING_C ${LIBHDFSPP_LIB_DIR}/bindings/c)
27-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers")
28-
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-field-initializers")
27+
28+
if (NOT MSVC)
29+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers")
30+
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-field-initializers")
31+
endif (NOT MSVC)
2932

3033
include_directories(
3134
${GENERATED_JAVAH}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ public class OfflineImageViewerPB {
8080
+ " delimiter. The default delimiter is \\t, though this may be\n"
8181
+ " changed via the -delimiter argument.\n"
8282
+ " -sp print storage policy, used by delimiter only.\n"
83+
+ " -ec print erasure coding policy, used by delimiter only.\n"
8384
+ " * DetectCorruption: Detect potential corruption of the image by\n"
8485
+ " selectively loading parts of it and actively searching for\n"
8586
+ " inconsistencies. Outputs a summary of the found corruptions\n"
@@ -132,6 +133,7 @@ private static Options buildOptions() {
132133
options.addOption("addr", true, "");
133134
options.addOption("delimiter", true, "");
134135
options.addOption("sp", false, "");
136+
options.addOption("ec", false, "");
135137
options.addOption("t", "temp", true, "");
136138
options.addOption("m", "multiThread", true, "");
137139

@@ -228,9 +230,11 @@ public static int run(String[] args) throws Exception {
228230
break;
229231
case "DELIMITED":
230232
boolean printStoragePolicy = cmd.hasOption("sp");
233+
boolean printECPolicy = cmd.hasOption("ec");
231234
try (PBImageDelimitedTextWriter writer =
232235
new PBImageDelimitedTextWriter(out, delimiter,
233-
tempPath, printStoragePolicy, threads, outputFile)) {
236+
tempPath, printStoragePolicy, printECPolicy, threads,
237+
outputFile, conf)) {
234238
writer.visit(inputFile);
235239
}
236240
break;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,12 @@
1717
*/
1818
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
1919

20+
import org.apache.hadoop.conf.Configuration;
2021
import org.apache.hadoop.fs.Path;
2122
import org.apache.hadoop.fs.permission.PermissionStatus;
23+
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
2224
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
25+
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
2326
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
2427
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
2528
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
@@ -46,6 +49,8 @@
4649
public class PBImageDelimitedTextWriter extends PBImageTextWriter {
4750
private static final String DATE_FORMAT = "yyyy-MM-dd HH:mm";
4851
private boolean printStoragePolicy;
52+
private boolean printECPolicy;
53+
private ErasureCodingPolicyManager ecManager;
4954

5055
static class OutputEntryBuilder {
5156
private final SimpleDateFormat dateFormatter =
@@ -62,6 +67,7 @@ static class OutputEntryBuilder {
6267
private long nsQuota = 0;
6368
private long dsQuota = 0;
6469
private int storagePolicy = 0;
70+
private String ecPolicy = "-";
6571

6672
private String dirPermission = "-";
6773
private PermissionStatus permissionStatus;
@@ -83,6 +89,13 @@ static class OutputEntryBuilder {
8389
aclPermission = "+";
8490
}
8591
storagePolicy = file.getStoragePolicyID();
92+
if (writer.printECPolicy && file.hasErasureCodingPolicyID()) {
93+
ErasureCodingPolicy policy = writer.ecManager.
94+
getByID((byte) file.getErasureCodingPolicyID());
95+
if (policy != null) {
96+
ecPolicy = policy.getName();
97+
}
98+
}
8699
break;
87100
case DIRECTORY:
88101
INodeDirectory dir = inode.getDirectory();
@@ -95,6 +108,12 @@ static class OutputEntryBuilder {
95108
aclPermission = "+";
96109
}
97110
storagePolicy = writer.getStoragePolicy(dir.getXAttrs());
111+
if (writer.printECPolicy) {
112+
String name= writer.getErasureCodingPolicyName(dir.getXAttrs());
113+
if (name != null) {
114+
ecPolicy = name;
115+
}
116+
}
98117
break;
99118
case SYMLINK:
100119
INodeSymlink s = inode.getSymlink();
@@ -134,6 +153,9 @@ public String build() {
134153
if (writer.printStoragePolicy) {
135154
writer.append(buffer, storagePolicy);
136155
}
156+
if (writer.printECPolicy) {
157+
writer.append(buffer, ecPolicy);
158+
}
137159
return buffer.substring(1);
138160
}
139161
}
@@ -146,14 +168,21 @@ public String build() {
146168
PBImageDelimitedTextWriter(PrintStream out, String delimiter,
147169
String tempPath, boolean printStoragePolicy)
148170
throws IOException {
149-
this(out, delimiter, tempPath, printStoragePolicy, 1, "-");
171+
this(out, delimiter, tempPath, printStoragePolicy, false, 1, "-", null);
150172
}
151173

152174
PBImageDelimitedTextWriter(PrintStream out, String delimiter,
153-
String tempPath, boolean printStoragePolicy, int threads,
154-
String parallelOut) throws IOException {
175+
String tempPath, boolean printStoragePolicy,
176+
boolean printECPolicy, int threads,
177+
String parallelOut, Configuration conf)
178+
throws IOException {
155179
super(out, delimiter, tempPath, threads, parallelOut);
156180
this.printStoragePolicy = printStoragePolicy;
181+
if (printECPolicy && conf != null) {
182+
this.printECPolicy = true;
183+
ecManager = ErasureCodingPolicyManager.getInstance();
184+
ecManager.init(conf);
185+
}
157186
}
158187

159188
@Override
@@ -187,6 +216,9 @@ public String getHeader() {
187216
if (printStoragePolicy) {
188217
append(buffer, "StoragePolicyId");
189218
}
219+
if (printECPolicy) {
220+
append(buffer, "ErasureCodingPolicy");
221+
}
190222
return buffer.toString();
191223
}
192224

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
import java.io.PrintStream;
2828
import java.io.RandomAccessFile;
2929
import java.io.UnsupportedEncodingException;
30+
import java.io.ByteArrayInputStream;
31+
import java.io.DataInputStream;
3032
import java.nio.ByteBuffer;
3133
import java.nio.channels.FileChannel;
3234
import java.util.ArrayList;
@@ -63,6 +65,7 @@
6365
import org.apache.hadoop.hdfs.server.namenode.INodeId;
6466
import org.apache.hadoop.hdfs.server.namenode.SerialNumberManager;
6567
import org.apache.hadoop.io.IOUtils;
68+
import org.apache.hadoop.io.WritableUtils;
6669
import org.apache.hadoop.util.LimitInputStream;
6770
import org.apache.hadoop.util.Lists;
6871
import org.apache.hadoop.util.Time;
@@ -77,6 +80,8 @@
7780
import org.apache.hadoop.util.Preconditions;
7881
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
7982

83+
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
84+
8085
/**
8186
* This class reads the protobuf-based fsimage and generates text output
8287
* for each inode to {@link PBImageTextWriter#out}. The sub-class can override
@@ -1029,4 +1034,23 @@ public static void mergeFiles(String[] srcPaths, String resultPath)
10291034
}
10301035
}
10311036
}
1037+
1038+
public String getErasureCodingPolicyName
1039+
(INodeSection.XAttrFeatureProto xattrFeatureProto) {
1040+
List<XAttr> xattrs =
1041+
FSImageFormatPBINode.Loader.loadXAttrs(xattrFeatureProto, stringTable);
1042+
for (XAttr xattr : xattrs) {
1043+
if (XATTR_ERASURECODING_POLICY.contains(xattr.getName())){
1044+
try{
1045+
ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
1046+
DataInputStream dIn = new DataInputStream(bIn);
1047+
return WritableUtils.readString(dIn);
1048+
} catch (IOException ioException){
1049+
return null;
1050+
}
1051+
}
1052+
}
1053+
return null;
1054+
}
1055+
10321056
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,39 @@ public void testStripedAndReplicatedFileChecksum() throws Exception {
215215
}
216216
}
217217

218+
/**
219+
* Test the corner case of the COMPOSITE_CRC.
220+
* For Stripe File, last block size in the file is (int)(blockSize * 0.5),
221+
* but the last block size in the check length is (int)(blockSize * 0.6).
222+
* For Replicate File, the last block size in the file is (int)(blockSize * 0.5),
223+
* but the last block size in the check length is ((dataBlocks - 1) * blockSize
224+
* + (int) (blockSize * 0.6))
225+
*/
226+
@Test(timeout = 90000)
227+
public void testStripedAndReplicatedFileChecksum2() throws Exception {
228+
final int lastBlockSize = (int) (blockSize * 0.5);
229+
final int fullStripeLength = dataBlocks * blockSize;
230+
final int testFileSize = fullStripeLength + lastBlockSize;
231+
prepareTestFiles(testFileSize, new String[] {stripedFile1, replicatedFile});
232+
233+
final int specialLength = (dataBlocks - 1) * blockSize
234+
+ (int) (blockSize * 0.6);
235+
236+
Assert.assertTrue(specialLength % blockSize > lastBlockSize);
237+
Assert.assertTrue(specialLength % fullStripeLength > lastBlockSize);
238+
239+
FileChecksum stripedFileChecksum = getFileChecksum(stripedFile1,
240+
specialLength, false);
241+
FileChecksum replicatedFileChecksum = getFileChecksum(replicatedFile,
242+
specialLength, false);
243+
244+
if (checksumCombineMode.equals(ChecksumCombineMode.COMPOSITE_CRC.name())) {
245+
Assert.assertEquals(replicatedFileChecksum, stripedFileChecksum);
246+
} else {
247+
Assert.assertNotEquals(replicatedFileChecksum, stripedFileChecksum);
248+
}
249+
}
250+
218251
@Test(timeout = 90000)
219252
public void testDifferentBlockSizeReplicatedFileChecksum() throws Exception {
220253
byte[] fileData = StripedFileTestUtil.generateBytes(fileSize);

0 commit comments

Comments
 (0)