Skip to content

HDFS-15321. Make DFSAdmin tool to work with ViewFileSystemOverloadScheme #2041

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.fs.viewfs;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
Expand All @@ -27,6 +28,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;

/******************************************************************************
Expand Down Expand Up @@ -227,4 +229,31 @@ private <T> T newInstance(Class<T> theClass, URI uri, Configuration conf) {

}

/**
* This is an admin only API to give access to its child raw file system, if
* the path is link. If the given path is an internal directory(path is from
* mount paths tree), it will initialize the file system of given path uri
* directly. If path cannot be resolved to any internal directory or link, it
* will throw NotInMountpointException. Please note, this API will not return
* chrooted file system. Instead, this API will get actual raw file system
* instances.
*
* @param path - fs uri path
* @param conf - configuration
* @throws IOException
*/
public FileSystem getRawFileSystem(Path path, Configuration conf)
throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(path), true);
return res.isInternalDir() ? fsGetter().get(path.toUri(), conf)
: ((ChRootedFileSystem) res.targetFileSystem).getMyFs();
} catch (FileNotFoundException e) {
// No link configured with passed path.
throw new NotInMountpointException(path,
"No link found for the given path.");
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ static void addMountLinksToFile(String mountTable, String[] sources,
* Adds the given mount links to the configuration. Mount link mappings are
* in sources, targets at their respective index locations.
*/
static void addMountLinksToConf(String mountTable, String[] sources,
public static void addMountLinksToConf(String mountTable, String[] sources,
String[] targets, Configuration config) throws URISyntaxException {
for (int i = 0; i < sources.length; i++) {
String src = sources[i];
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
/**

* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand All @@ -21,6 +22,8 @@

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
Expand All @@ -43,19 +46,29 @@ public class AdminHelper {
static DistributedFileSystem getDFS(Configuration conf)
throws IOException {
FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
return (DistributedFileSystem)fs;
return checkAndGetDFS(fs, conf);
}

static DistributedFileSystem getDFS(URI uri, Configuration conf)
throws IOException {
FileSystem fs = FileSystem.get(uri, conf);
return checkAndGetDFS(fs, conf);
}

static DistributedFileSystem checkAndGetDFS(FileSystem fs, Configuration conf)
throws IOException {
if ((fs instanceof ViewFileSystemOverloadScheme)) {
// With ViewFSOverloadScheme, the admin will pass -fs option with intended
// child fs mount path. GenericOptionsParser would have set the given -fs
// as FileSystem's defaultURI. So, we are using FileSystem.getDefaultUri
// to use the given -fs path.
fs = ((ViewFileSystemOverloadScheme) fs)
.getRawFileSystem(new Path(FileSystem.getDefaultUri(conf)), conf);
}
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri()
+ " is not an HDFS file system");
+ " is not an HDFS file system. The fs class is: "
+ fs.getClass().getName());
}
return (DistributedFileSystem) fs;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,11 +130,7 @@ public DFSAdminCommand(Configuration conf) {
@Override
public void run(PathData pathData) throws IOException {
FileSystem fs = pathData.fs;
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri()
+ " is not an HDFS file system");
}
this.dfs = (DistributedFileSystem) fs;
this.dfs = AdminHelper.checkAndGetDFS(fs, getConf());
run(pathData.path);
}
}
Expand Down Expand Up @@ -485,12 +481,7 @@ public DFSAdmin(Configuration conf) {
}

protected DistributedFileSystem getDFS() throws IOException {
FileSystem fs = getFS();
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
return (DistributedFileSystem)fs;
return AdminHelper.getDFS(getConf());
}

/**
Expand Down
Loading