Skip to content

Commit 573af8b

Browse files
mehakmeetMehakmeet Singh
authored andcommitted
HADOOP-17817.HADOOP-17823. S3A to raise IOE if both S3-CSE and S3Guard enabled (apache#3239)
S3A S3Guard tests to skip if S3-CSE are enabled (apache#3263) Follow on to * HADOOP-13887. Encrypt S3A data client-side with AWS SDK (S3-CSE) If the S3A bucket is set up to use S3-CSE encryption, all tests which turn on S3Guard are skipped, so they don't raise any exceptions about incompatible configurations. Contributed by Mehakmeet Singh
1 parent e421824 commit 573af8b

File tree

11 files changed

+111
-3
lines changed

11 files changed

+111
-3
lines changed

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@
216216
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound;
217217
import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket;
218218
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH;
219+
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_S3GUARD_INCOMPATIBLE;
219220
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT;
220221
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT;
221222
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404;
@@ -539,6 +540,9 @@ public void initialize(URI name, Configuration originalConf)
539540
if (hasMetadataStore()) {
540541
LOG.debug("Using metadata store {}, authoritative store={}, authoritative path={}",
541542
getMetadataStore(), allowAuthoritativeMetadataStore, allowAuthoritativePaths);
543+
if (isCSEEnabled) {
544+
throw new PathIOException(uri.toString(), CSE_S3GUARD_INCOMPATIBLE);
545+
}
542546
}
543547

544548
// LOG if S3Guard is disabled on the warn level set in config

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,4 +134,9 @@ private InternalConstants() {
134134
*/
135135
public static final int CSE_PADDING_LENGTH = 16;
136136

137+
/**
138+
* Error message to indicate S3-CSE is incompatible with S3Guard.
139+
*/
140+
public static final String CSE_S3GUARD_INCOMPATIBLE = "S3-CSE cannot be "
141+
+ "used with S3Guard";
137142
}

hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -601,6 +601,7 @@ clients where S3-CSE has not been enabled.
601601

602602
### Limitations
603603

604+
- S3Guard is not supported with S3-CSE.
604605
- Performance will be reduced. All encrypt/decrypt is now being done on the
605606
client.
606607
- Writing files may be slower, as only a single block can be encrypted and

hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1435,6 +1435,31 @@ The user trying to use the KMS Key ID should have the right permissions to acces
14351435
If not, then add permission(or IAM role) in "Key users" section by selecting the
14361436
AWS-KMS CMK Key on AWS console.
14371437

1438+
### S3-CSE cannot be used with S3Guard
1439+
1440+
S3-CSE not supported for S3Guard enabled buckets.
1441+
```
1442+
org.apache.hadoop.fs.PathIOException: `s3a://test-bucket': S3-CSE cannot be used with S3Guard
1443+
at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:543)
1444+
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3460)
1445+
at org.apache.hadoop.fs.FileSystem.access$300(FileSystem.java:172)
1446+
at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3565)
1447+
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3512)
1448+
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:539)
1449+
at org.apache.hadoop.fs.Path.getFileSystem(Path.java:366)
1450+
at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:342)
1451+
at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:252)
1452+
at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:235)
1453+
at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:105)
1454+
at org.apache.hadoop.fs.shell.Command.run(Command.java:179)
1455+
at org.apache.hadoop.fs.FsShell.run(FsShell.java:327)
1456+
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:81)
1457+
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:95)
1458+
at org.apache.hadoop.fs.FsShell.main(FsShell.java:390)
1459+
```
1460+
If you want to use S3Guard then disable S3-CSE or disable S3Guard if you want
1461+
to use S3-CSE.
1462+
14381463
### <a name="not_all_bytes_were_read"></a> Message appears in logs "Not all bytes were read from the S3ObjectInputStream"
14391464

14401465

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,8 @@ protected AbstractFSContract createContract(Configuration conf) {
143143
public void teardown() throws Exception {
144144
super.teardown();
145145
S3AFileSystem fs = getFileSystem();
146-
if (fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, false)) {
146+
if (fs != null && fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE,
147+
false)) {
147148
fs.close();
148149
}
149150
}

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,17 @@
1818

1919
package org.apache.hadoop.fs.contract.s3a;
2020

21+
import java.io.IOException;
22+
2123
import org.apache.hadoop.conf.Configuration;
2224
import org.apache.hadoop.fs.Path;
25+
import org.apache.hadoop.fs.PathIOException;
2326
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
2427
import org.apache.hadoop.fs.s3a.S3AFileSystem;
2528
import org.apache.hadoop.fs.s3a.S3ATestUtils;
2629

30+
import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipIfS3GuardAndS3CSEIOE;
31+
2732
/**
2833
* The contract of S3A: only enabled if the test bucket is provided.
2934
*/
@@ -63,6 +68,20 @@ public S3AContract(Configuration conf, boolean addContractResource) {
6368
}
6469
}
6570

71+
/**
72+
* Skip S3AFS initialization if S3-CSE and S3Guard are enabled.
73+
*
74+
*/
75+
@Override
76+
public void init() throws IOException {
77+
try {
78+
super.init();
79+
} catch (PathIOException ioe) {
80+
// Skip the tests if S3-CSE and S3-Guard are enabled.
81+
maybeSkipIfS3GuardAndS3CSEIOE(ioe);
82+
}
83+
}
84+
6685
@Override
6786
public String getScheme() {
6887
return "s3a";

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ public void setup() throws Exception {
5858
Configuration conf = createConfiguration();
5959
fs = new S3AFileSystem();
6060
URI uri = URI.create(FS_S3A + "://" + BUCKET);
61+
// unset S3CSE property from config to avoid pathIOE.
62+
conf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM);
6163
fs.initialize(uri, conf);
6264
s3 = fs.getAmazonS3ClientForTesting("mocking");
6365
}

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
*/
3434
public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest {
3535

36+
private S3AContract contract;
3637

3738
public ITestS3AFSMainOperations() {
3839
super(createTestPath(
@@ -41,11 +42,18 @@ public ITestS3AFSMainOperations() {
4142

4243
@Override
4344
protected FileSystem createFileSystem() throws Exception {
44-
S3AContract contract = new S3AContract(new Configuration());
45+
contract = new S3AContract(new Configuration());
4546
contract.init();
4647
return contract.getTestFileSystem();
4748
}
4849

50+
@Override
51+
public void tearDown() throws Exception {
52+
if (contract.getTestFileSystem() != null) {
53+
super.tearDown();
54+
}
55+
}
56+
4957
@Override
5058
@Ignore("Permissions not supported")
5159
public void testListStatusThrowsExceptionForUnreadableDir() {

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ public void setup() throws Exception {
7777

7878
@Override
7979
public void teardown() throws Exception {
80-
if (getFileSystem()
80+
if (getFileSystem() != null && getFileSystem()
8181
.getAmazonS3Client() instanceof InconsistentAmazonS3Client) {
8282
clearInconsistency(getFileSystem());
8383
}

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import org.apache.hadoop.fs.FileSystem;
3030
import org.apache.hadoop.fs.LocatedFileStatus;
3131
import org.apache.hadoop.fs.Path;
32+
import org.apache.hadoop.fs.PathIOException;
3233
import org.apache.hadoop.fs.RemoteIterator;
3334
import org.apache.hadoop.fs.permission.FsPermission;
3435
import org.apache.hadoop.fs.s3a.auth.MarshalledCredentialBinding;
@@ -37,6 +38,7 @@
3738

3839
import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy;
3940
import org.apache.hadoop.fs.s3a.impl.ContextAccessors;
41+
import org.apache.hadoop.fs.s3a.impl.InternalConstants;
4042
import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
4143
import org.apache.hadoop.fs.s3a.impl.StoreContext;
4244
import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder;
@@ -186,6 +188,8 @@ public static S3AFileSystem createTestFileSystem(Configuration conf,
186188
// make this whole class not run by default
187189
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
188190
liveTest);
191+
// Skip if S3Guard and S3-CSE are enabled.
192+
skipIfS3GuardAndS3CSEEnabled(conf);
189193
// patch in S3Guard options
190194
maybeEnableS3Guard(conf);
191195
S3AFileSystem fs1 = new S3AFileSystem();
@@ -229,12 +233,45 @@ public static FileContext createTestFileContext(Configuration conf)
229233
// make this whole class not run by default
230234
Assume.assumeTrue("No test filesystem in " + TEST_FS_S3A_NAME,
231235
liveTest);
236+
// Skip if S3Guard and S3-CSE are enabled.
237+
skipIfS3GuardAndS3CSEEnabled(conf);
232238
// patch in S3Guard options
233239
maybeEnableS3Guard(conf);
234240
FileContext fc = FileContext.getFileContext(testURI, conf);
235241
return fc;
236242
}
237243

244+
/**
245+
* Skip if S3Guard and S3CSE are enabled together.
246+
*
247+
* @param conf Test Configuration.
248+
*/
249+
private static void skipIfS3GuardAndS3CSEEnabled(Configuration conf) {
250+
String encryptionMethod =
251+
conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM, "");
252+
String metaStore = conf.getTrimmed(S3_METADATA_STORE_IMPL, "");
253+
if (encryptionMethod.equals(S3AEncryptionMethods.CSE_KMS.getMethod()) &&
254+
!metaStore.equals(S3GUARD_METASTORE_NULL)) {
255+
skip("Skipped if CSE is enabled with S3Guard.");
256+
}
257+
}
258+
259+
/**
260+
* Either skip if PathIOE occurred due to S3CSE and S3Guard
261+
* incompatibility or throw the PathIOE.
262+
*
263+
* @param ioe PathIOE being parsed.
264+
* @throws PathIOException Throws PathIOE if it doesn't relate to S3CSE
265+
* and S3Guard incompatibility.
266+
*/
267+
public static void maybeSkipIfS3GuardAndS3CSEIOE(PathIOException ioe)
268+
throws PathIOException {
269+
if (ioe.toString().contains(InternalConstants.CSE_S3GUARD_INCOMPATIBLE)) {
270+
skip("Skipping since CSE is enabled with S3Guard.");
271+
}
272+
throw ioe;
273+
}
274+
238275
/**
239276
* Get a long test property.
240277
* <ol>

0 commit comments

Comments
 (0)