Skip to content

HDFS-15052. WebHDFS getTrashRoot leads to OOM due to FileSystem objec… #1758

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Feb 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
Expand Down Expand Up @@ -1003,7 +1004,7 @@ public static Path makePathFromFileId(long fileId) {
* @param ugi {@link UserGroupInformation} of current user.
* @return the home directory of current user.
*/
public static Path getHomeDirectory(Configuration conf,
public static String getHomeDirectory(Configuration conf,
UserGroupInformation ugi) {
String userHomePrefix = HdfsClientConfigKeys
.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
Expand All @@ -1012,6 +1013,31 @@ public static Path getHomeDirectory(Configuration conf,
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
}
return new Path(userHomePrefix + "/" + ugi.getShortUserName());
return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName();
}

/**
* Returns trash root in non-encryption zone.
* @param conf configuration.
* @param ugi user of trash owner.
* @return unqualified path of trash root.
*/
public static String getTrashRoot(Configuration conf,
UserGroupInformation ugi) {
return getHomeDirectory(conf, ugi)
+ Path.SEPARATOR + FileSystem.TRASH_PREFIX;
}

/**
* Returns trash root in encryption zone.
* @param ez encryption zone.
* @param ugi user of trash owner.
* @return unqualified path of trash root.
*/
public static String getEZTrashRoot(EncryptionZone ez,
UserGroupInformation ugi) {
String ezpath = ez.getPath();
return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR)
+ FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,8 @@ public void setWorkingDirectory(Path dir) {

@Override
public Path getHomeDirectory() {
return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi));
return makeQualified(
new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)));
}

/**
Expand Down Expand Up @@ -3113,8 +3114,7 @@ public Path getTrashRoot(Path path) {
EncryptionZone ez = dfs.getEZForPath(parentSrc);
if ((ez != null)) {
return this.makeQualified(
new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX),
dfs.ugi.getShortUserName()));
new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)));
}
} catch (IOException e) {
DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
Expand All @@ -3141,7 +3141,8 @@ public Collection<FileStatus> getTrashRoots(boolean allUsers) {
// Get EZ Trash roots
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
Path ezTrashRoot = new Path(it.next().getPath(),
EncryptionZone ez = it.next();
Path ezTrashRoot = new Path(ez.getPath(),
FileSystem.TRASH_PREFIX);
if (!exists(ezTrashRoot)) {
continue;
Expand All @@ -3153,7 +3154,7 @@ public Collection<FileStatus> getTrashRoots(boolean allUsers) {
}
}
} else {
Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
try {
ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
Expand All @@ -81,6 +80,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
Expand Down Expand Up @@ -1244,7 +1244,7 @@ protected Response get(
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETHOMEDIRECTORY: {
String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString();
String userHome = DFSUtilClient.getHomeDirectory(conf, ugi);
final String js = JsonUtil.toJsonString("Path", userHome);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
Expand Down Expand Up @@ -1285,7 +1285,7 @@ protected Response get(
return Response.ok().build();
}
case GETTRASHROOT: {
final String trashPath = getTrashRoot(fullpath, conf);
final String trashPath = getTrashRoot(conf, fullpath);
final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
}
Expand Down Expand Up @@ -1345,11 +1345,39 @@ protected Response get(
}
}

private static String getTrashRoot(String fullPath,
Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration());
return fs.getTrashRoot(
new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath();
private String getTrashRoot(Configuration conf, String fullPath)
throws IOException {
UserGroupInformation ugi= UserGroupInformation.getCurrentUser();
String parentSrc = getParent(fullPath);
EncryptionZone ez = getRpcClientProtocol().getEZForPath(
parentSrc != null ? parentSrc : fullPath);
String trashRoot;
if (ez != null) {
trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi);
} else {
trashRoot = DFSUtilClient.getTrashRoot(conf, ugi);
}
return trashRoot;
}

/**
* Returns the parent of a path in the same way as Path#getParent.
* @return the parent of a path or null if at root
*/
public String getParent(String path) {
int lastSlash = path.lastIndexOf('/');
int start = 0;
if ((path.length() == start) || // empty path
(lastSlash == start && path.length() == start + 1)) { // at root
return null;
}
String parent;
if (lastSlash == -1) {
parent = org.apache.hadoop.fs.Path.CUR_DIR;
} else {
parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash);
}
return parent;
}

private static DirectoryListing getDirectoryListing(final ClientProtocol cp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import static org.junit.Assert.fail;

import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
Expand All @@ -48,6 +49,7 @@
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Random;
Expand All @@ -62,11 +64,13 @@
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
Expand All @@ -85,6 +89,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
Expand Down Expand Up @@ -1535,6 +1541,52 @@ public void testGetTrashRoot() throws Exception {
assertEquals(expectedPath.toUri().getPath(), trashPath.toUri().getPath());
}

@Test
public void testGetEZTrashRoot() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
FileSystemTestHelper fsHelper = new FileSystemTestHelper();
File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
dfs.getClient().setKeyProvider(
cluster.getNameNode().getNamesystem().getProvider());
final String testkey = "test_key";
DFSTestUtil.createKey(testkey, cluster, conf);

final Path zone1 = new Path("/zone1");
dfs.mkdirs(zone1, new FsPermission(700));
dfsAdmin.createEncryptionZone(zone1, testkey,
EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH));

final Path insideEZ = new Path(zone1, "insideEZ");
dfs.mkdirs(insideEZ, new FsPermission(700));
assertEquals(
dfs.getTrashRoot(insideEZ).toUri().getPath(),
webhdfs.getTrashRoot(insideEZ).toUri().getPath());

final Path outsideEZ = new Path("/outsideEZ");
dfs.mkdirs(outsideEZ, new FsPermission(755));
assertEquals(
dfs.getTrashRoot(outsideEZ).toUri().getPath(),
webhdfs.getTrashRoot(outsideEZ).toUri().getPath());

final Path root = new Path("/");
assertEquals(
dfs.getTrashRoot(root).toUri().getPath(),
webhdfs.getTrashRoot(root).toUri().getPath());
assertEquals(
webhdfs.getTrashRoot(root).toUri().getPath(),
webhdfs.getTrashRoot(zone1).toUri().getPath());
assertEquals(
webhdfs.getTrashRoot(outsideEZ).toUri().getPath(),
webhdfs.getTrashRoot(zone1).toUri().getPath());
}

@Test
public void testStoragePolicy() throws Exception {
Expand Down