-
Notifications
You must be signed in to change notification settings - Fork 9.1k
HDFS-16008. RBF: Tool to initialize ViewFS Mapping to Router #2981
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: trunk
Are you sure you want to change the base?
Changes from all commits
ef407a8
9a7c9f1
e0b806f
7fa083a
3be7673
c33ba43
698a3a9
3370147
93dbc90
0204cdc
bc5089e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ | |
|
||
import java.io.IOException; | ||
import java.net.InetSocketAddress; | ||
import java.net.URI; | ||
import java.util.Arrays; | ||
import java.util.Collection; | ||
import java.util.LinkedHashMap; | ||
|
@@ -34,6 +35,10 @@ | |
import org.apache.hadoop.fs.CommonConfigurationKeys; | ||
import org.apache.hadoop.fs.StorageType; | ||
import org.apache.hadoop.fs.permission.FsPermission; | ||
import org.apache.hadoop.fs.viewfs.Constants; | ||
import org.apache.hadoop.fs.Path; | ||
import org.apache.hadoop.fs.FileSystem; | ||
import org.apache.hadoop.fs.FileStatus; | ||
import org.apache.hadoop.hdfs.DFSConfigKeys; | ||
import org.apache.hadoop.hdfs.HdfsConfiguration; | ||
import org.apache.hadoop.hdfs.protocol.HdfsConstants; | ||
|
@@ -101,6 +106,9 @@ public class RouterAdmin extends Configured implements Tool { | |
/** Pre-compiled regular expressions to detect duplicated slashes. */ | ||
private static final Pattern SLASHES = Pattern.compile("/+"); | ||
|
||
// Parameter matching when initializing ViewFs mount point. | ||
private static final String ALL_CLUSTERS = "allClusters"; | ||
|
||
public static void main(String[] argv) throws Exception { | ||
Configuration conf = new HdfsConfiguration(); | ||
RouterAdmin admin = new RouterAdmin(conf); | ||
|
@@ -131,8 +139,8 @@ private String getUsage(String cmd) { | |
String[] commands = | ||
{"-add", "-update", "-rm", "-ls", "-getDestination", "-setQuota", | ||
"-setStorageTypeQuota", "-clrQuota", "-clrStorageTypeQuota", | ||
"-safemode", "-nameservice", "-getDisabledNameservices", | ||
"-refresh", "-refreshRouterArgs", | ||
"-initViewFsToMountTable", "-safemode", "-nameservice", | ||
"-getDisabledNameservices", "-refresh", "-refreshRouterArgs", | ||
"-refreshSuperUserGroupsConfiguration"}; | ||
StringBuilder usage = new StringBuilder(); | ||
usage.append("Usage: hdfs dfsrouteradmin :\n"); | ||
|
@@ -171,7 +179,9 @@ private String getUsage(String cmd) { | |
return "\t[-clrQuota <path>]"; | ||
} else if (cmd.equals("-clrStorageTypeQuota")) { | ||
return "\t[-clrStorageTypeQuota <path>]"; | ||
} else if (cmd.equals("-safemode")) { | ||
} else if (cmd.equals("-initViewFsToMountTable")) { | ||
return "\t[-initViewFsToMountTable <clusterName> | allClusters]"; | ||
}else if (cmd.equals("-safemode")) { | ||
return "\t[-safemode enter | leave | get]"; | ||
} else if (cmd.equals("-nameservice")) { | ||
return "\t[-nameservice enable | disable <nameservice>]"; | ||
|
@@ -242,6 +252,10 @@ private boolean validateMin(String[] argv) { | |
if (argv.length < 2) { | ||
return false; | ||
} | ||
} else if ("-initViewFsToMountTable".equals(cmd)) { | ||
if (argv.length < 2) { | ||
return false; | ||
} | ||
} else if ("-getDestination".equals(cmd)) { | ||
if (argv.length < 2) { | ||
return false; | ||
|
@@ -384,6 +398,15 @@ public int run(String[] argv) throws Exception { | |
getDisabledNameservices(); | ||
} else if ("-refresh".equals(cmd)) { | ||
refresh(address); | ||
} else if ("-initViewFsToMountTable".equals(cmd)) { | ||
if (initViewFsToMountTable(argv[i])) { | ||
System.out.println("Successfully init ViewFs mapping to router " + | ||
argv[i]); | ||
} else { | ||
System.err.println( | ||
"Failed when execute command initViewFsToMountTable"); | ||
exitCode = -1; | ||
} | ||
} else if ("-refreshRouterArgs".equals(cmd)) { | ||
exitCode = genericRefresh(argv, i); | ||
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { | ||
|
@@ -1036,6 +1059,83 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) | |
return updateResponse.getStatus(); | ||
} | ||
|
||
/** | ||
* Initialize the ViewFS mount point to the Router, | ||
* either to specify a cluster or to initialize it all. | ||
* @param clusterName The specified cluster to initialize, | ||
* AllCluster was then all clusters. | ||
* @return If the quota was updated. | ||
* @throws IOException Error adding the mount point. | ||
*/ | ||
public boolean initViewFsToMountTable(String clusterName) | ||
throws IOException { | ||
// fs.viewfs.mounttable.ClusterX.link./data | ||
final String mountTablePrefix; | ||
if (clusterName.equals(ALL_CLUSTERS)) { | ||
mountTablePrefix = | ||
Constants.CONFIG_VIEWFS_PREFIX + ".*" + | ||
Constants.CONFIG_VIEWFS_LINK + "."; | ||
} else { | ||
mountTablePrefix = | ||
Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + | ||
Constants.CONFIG_VIEWFS_LINK + "."; | ||
} | ||
final String rootPath = "/"; | ||
Map<String, String> viewFsMap = getConf().getValByRegex( | ||
mountTablePrefix + rootPath); | ||
if (viewFsMap.isEmpty()) { | ||
System.out.println("There is no ViewFs mapping to initialize."); | ||
return true; | ||
} | ||
for (Entry<String, String> entry : viewFsMap.entrySet()) { | ||
Path path = new Path(entry.getValue()); | ||
URI destUri = path.toUri(); | ||
String mountKey = entry.getKey(); | ||
DestinationOrder order = DestinationOrder.HASH; | ||
String mount = mountKey.replaceAll(mountTablePrefix, ""); | ||
if (!destUri.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { | ||
System.out.println("Only supports HDFS, " + | ||
"added Mount Point failed , " + mountKey); | ||
} | ||
if (!mount.startsWith(rootPath) || | ||
!destUri.getPath().startsWith(rootPath)) { | ||
System.out.println("Added Mount Point failed " + mountKey); | ||
continue; | ||
} | ||
String[] nss = new String[]{destUri.getAuthority()}; | ||
boolean added = addMount( | ||
mount, nss, destUri.getPath(), false, | ||
false, order, getACLEntityFormHdfsPath(path, getConf())); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if we specify There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Hexiaoqiao I didn't find any problems here, can you tell me the details, thank you very much. |
||
if (added) { | ||
System.out.println("Added mount point " + mount); | ||
} | ||
} | ||
return true; | ||
} | ||
|
||
/** | ||
* Returns ACLEntity according to a HDFS pat. | ||
* @param path A path of HDFS. | ||
*/ | ||
static private ACLEntity getACLEntityFormHdfsPath( | ||
Path path, Configuration conf) { | ||
String owner = null; | ||
String group = null; | ||
FsPermission mode = null; | ||
try { | ||
FileSystem fs = path.getFileSystem(conf); | ||
if (fs.exists(path)) { | ||
FileStatus fileStatus = fs.getFileStatus(path); | ||
owner = fileStatus.getOwner(); | ||
group = fileStatus.getGroup(); | ||
mode = fileStatus.getPermission(); | ||
} | ||
} catch (IOException e) { | ||
System.err.println("Exception encountered " + e); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. just suggest to throw exception rather than just print the error information. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. When FileStatus cannot be obtained, I think the default ACLEntity should be used to add the mapping. |
||
} | ||
return new ACLEntity(owner, group, mode); | ||
} | ||
|
||
/** | ||
* Update storage type quota of specified mount table. | ||
* | ||
|
Uh oh!
There was an error while loading. Please reload this page.