Skip to content

Commit 3fc1c44

Browse files
committed
HDFS-15052. WebHDFS getTrashRoot leads to OOM due to FileSystem object creation. (#1758)
(cherry picked from commit 2338d25) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (cherry picked from commit 610805e) Conflicts: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
1 parent ce06503 commit 3fc1c44

File tree

4 files changed

+124
-15
lines changed

4 files changed

+124
-15
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@
5454
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
5555
import org.apache.hadoop.hdfs.protocol.DatanodeID;
5656
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
57+
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
5758
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
5859
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
5960
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -772,7 +773,7 @@ public static InterruptedIOException toInterruptedIOException(String message,
772773
* @param ugi {@link UserGroupInformation} of current user.
773774
* @return the home directory of current user.
774775
*/
775-
public static Path getHomeDirectory(Configuration conf,
776+
public static String getHomeDirectory(Configuration conf,
776777
UserGroupInformation ugi) {
777778
String userHomePrefix = HdfsClientConfigKeys
778779
.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
@@ -781,6 +782,31 @@ public static Path getHomeDirectory(Configuration conf,
781782
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
782783
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
783784
}
784-
return new Path(userHomePrefix + "/" + ugi.getShortUserName());
785+
return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName();
786+
}
787+
788+
/**
789+
* Returns trash root in non-encryption zone.
790+
* @param conf configuration.
791+
* @param ugi user of trash owner.
792+
* @return unqualified path of trash root.
793+
*/
794+
public static String getTrashRoot(Configuration conf,
795+
UserGroupInformation ugi) {
796+
return getHomeDirectory(conf, ugi)
797+
+ Path.SEPARATOR + FileSystem.TRASH_PREFIX;
798+
}
799+
800+
/**
801+
* Returns trash root in encryption zone.
802+
* @param ez encryption zone.
803+
* @param ugi user of trash owner.
804+
* @return unqualified path of trash root.
805+
*/
806+
public static String getEZTrashRoot(EncryptionZone ez,
807+
UserGroupInformation ugi) {
808+
String ezpath = ez.getPath();
809+
return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR)
810+
+ FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName();
785811
}
786812
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,8 @@ public void setWorkingDirectory(Path dir) {
197197

198198
@Override
199199
public Path getHomeDirectory() {
200-
return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi));
200+
return makeQualified(
201+
new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)));
201202
}
202203

203204
/**
@@ -2635,8 +2636,7 @@ public Path getTrashRoot(Path path) {
26352636
EncryptionZone ez = dfs.getEZForPath(parentSrc);
26362637
if ((ez != null)) {
26372638
return this.makeQualified(
2638-
new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX),
2639-
dfs.ugi.getShortUserName()));
2639+
new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)));
26402640
}
26412641
} catch (IOException e) {
26422642
DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
@@ -2663,7 +2663,8 @@ public Collection<FileStatus> getTrashRoots(boolean allUsers) {
26632663
// Get EZ Trash roots
26642664
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
26652665
while (it.hasNext()) {
2666-
Path ezTrashRoot = new Path(it.next().getPath(),
2666+
EncryptionZone ez = it.next();
2667+
Path ezTrashRoot = new Path(ez.getPath(),
26672668
FileSystem.TRASH_PREFIX);
26682669
if (!exists(ezTrashRoot)) {
26692670
continue;
@@ -2675,7 +2676,7 @@ public Collection<FileStatus> getTrashRoots(boolean allUsers) {
26752676
}
26762677
}
26772678
} else {
2678-
Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
2679+
Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
26792680
try {
26802681
ret.add(getFileStatus(userTrash));
26812682
} catch (FileNotFoundException ignored) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@
6262
import org.apache.hadoop.fs.ContentSummary;
6363
import org.apache.hadoop.fs.FileEncryptionInfo;
6464
import org.apache.hadoop.fs.FileStatus;
65-
import org.apache.hadoop.fs.FileSystem;
6665
import org.apache.hadoop.fs.FsServerDefaults;
6766
import org.apache.hadoop.fs.Options;
6867
import org.apache.hadoop.fs.XAttr;
@@ -76,6 +75,7 @@
7675
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
7776
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
7877
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
78+
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
7979
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
8080
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
8181
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -1097,7 +1097,7 @@ protected Response get(
10971097
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
10981098
}
10991099
case GETHOMEDIRECTORY: {
1100-
String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString();
1100+
String userHome = DFSUtilClient.getHomeDirectory(conf, ugi);
11011101
final String js = JsonUtil.toJsonString("Path", userHome);
11021102
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
11031103
}
@@ -1136,7 +1136,7 @@ protected Response get(
11361136
return Response.ok().build();
11371137
}
11381138
case GETTRASHROOT: {
1139-
final String trashPath = getTrashRoot(fullpath, conf);
1139+
final String trashPath = getTrashRoot(conf, fullpath);
11401140
final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
11411141
return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
11421142
}
@@ -1178,11 +1178,39 @@ protected Response get(
11781178
}
11791179
}
11801180

1181-
private static String getTrashRoot(String fullPath,
1182-
Configuration conf) throws IOException {
1183-
FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration());
1184-
return fs.getTrashRoot(
1185-
new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath();
1181+
private String getTrashRoot(Configuration conf, String fullPath)
1182+
throws IOException {
1183+
UserGroupInformation ugi= UserGroupInformation.getCurrentUser();
1184+
String parentSrc = getParent(fullPath);
1185+
EncryptionZone ez = getRpcClientProtocol().getEZForPath(
1186+
parentSrc != null ? parentSrc : fullPath);
1187+
String trashRoot;
1188+
if (ez != null) {
1189+
trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi);
1190+
} else {
1191+
trashRoot = DFSUtilClient.getTrashRoot(conf, ugi);
1192+
}
1193+
return trashRoot;
1194+
}
1195+
1196+
/**
1197+
* Returns the parent of a path in the same way as Path#getParent.
1198+
* @return the parent of a path or null if at root
1199+
*/
1200+
public String getParent(String path) {
1201+
int lastSlash = path.lastIndexOf('/');
1202+
int start = 0;
1203+
if ((path.length() == start) || // empty path
1204+
(lastSlash == start && path.length() == start + 1)) { // at root
1205+
return null;
1206+
}
1207+
String parent;
1208+
if (lastSlash == -1) {
1209+
parent = org.apache.hadoop.fs.Path.CUR_DIR;
1210+
} else {
1211+
parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash);
1212+
}
1213+
return parent;
11861214
}
11871215

11881216
private static DirectoryListing getDirectoryListing(final ClientProtocol cp,

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import static org.junit.Assert.fail;
3535

3636
import java.io.EOFException;
37+
import java.io.File;
3738
import java.io.IOException;
3839
import java.io.InputStream;
3940
import java.io.OutputStream;
@@ -47,6 +48,7 @@
4748
import java.nio.charset.StandardCharsets;
4849
import java.security.PrivilegedExceptionAction;
4950
import java.util.Arrays;
51+
import java.util.EnumSet;
5052
import java.util.Map;
5153
import java.util.Random;
5254

@@ -58,11 +60,13 @@
5860
import org.apache.hadoop.fs.BlockLocation;
5961
import org.apache.hadoop.fs.BlockStoragePolicySpi;
6062
import org.apache.hadoop.fs.CommonConfigurationKeys;
63+
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
6164
import org.apache.hadoop.fs.ContentSummary;
6265
import org.apache.hadoop.fs.FSDataInputStream;
6366
import org.apache.hadoop.fs.FSDataOutputStream;
6467
import org.apache.hadoop.fs.FileStatus;
6568
import org.apache.hadoop.fs.FileSystem;
69+
import org.apache.hadoop.fs.FileSystemTestHelper;
6670
import org.apache.hadoop.fs.FsServerDefaults;
6771
import org.apache.hadoop.fs.Path;
6872
import org.apache.hadoop.fs.QuotaUsage;
@@ -80,6 +84,8 @@
8084
import org.apache.hadoop.hdfs.MiniDFSCluster;
8185
import org.apache.hadoop.hdfs.TestDFSClientRetries;
8286
import org.apache.hadoop.hdfs.TestFileCreation;
87+
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
88+
import org.apache.hadoop.hdfs.client.HdfsAdmin;
8389
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
8490
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
8591
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1397,6 +1403,54 @@ public void testGetTrashRoot() throws Exception {
13971403
}
13981404
}
13991405

1406+
@Test
1407+
public void testGetEZTrashRoot() throws Exception {
1408+
final Configuration conf = WebHdfsTestUtil.createConf();
1409+
FileSystemTestHelper fsHelper = new FileSystemTestHelper();
1410+
File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
1411+
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
1412+
"jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri());
1413+
final MiniDFSCluster cluster =
1414+
new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
1415+
cluster.waitActive();
1416+
DistributedFileSystem dfs = cluster.getFileSystem();
1417+
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
1418+
conf, WebHdfsConstants.WEBHDFS_SCHEME);
1419+
HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
1420+
dfs.getClient().setKeyProvider(
1421+
cluster.getNameNode().getNamesystem().getProvider());
1422+
final String testkey = "test_key";
1423+
DFSTestUtil.createKey(testkey, cluster, conf);
1424+
1425+
final Path zone1 = new Path("/zone1");
1426+
dfs.mkdirs(zone1, new FsPermission((short)0700));
1427+
dfsAdmin.createEncryptionZone(zone1, testkey,
1428+
EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH));
1429+
1430+
final Path insideEZ = new Path(zone1, "insideEZ");
1431+
dfs.mkdirs(insideEZ, new FsPermission((short)0700));
1432+
assertEquals(
1433+
dfs.getTrashRoot(insideEZ).toUri().getPath(),
1434+
webhdfs.getTrashRoot(insideEZ).toUri().getPath());
1435+
1436+
final Path outsideEZ = new Path("/outsideEZ");
1437+
dfs.mkdirs(outsideEZ, new FsPermission((short)0755));
1438+
assertEquals(
1439+
dfs.getTrashRoot(outsideEZ).toUri().getPath(),
1440+
webhdfs.getTrashRoot(outsideEZ).toUri().getPath());
1441+
1442+
final Path root = new Path("/");
1443+
assertEquals(
1444+
dfs.getTrashRoot(root).toUri().getPath(),
1445+
webhdfs.getTrashRoot(root).toUri().getPath());
1446+
assertEquals(
1447+
webhdfs.getTrashRoot(root).toUri().getPath(),
1448+
webhdfs.getTrashRoot(zone1).toUri().getPath());
1449+
assertEquals(
1450+
webhdfs.getTrashRoot(outsideEZ).toUri().getPath(),
1451+
webhdfs.getTrashRoot(zone1).toUri().getPath());
1452+
}
1453+
14001454
@Test
14011455
public void testStoragePolicy() throws Exception {
14021456
MiniDFSCluster cluster = null;

0 commit comments

Comments
 (0)