Skip to content

HDFS-15886. Add a way to get protected dirs from a special configuration file. #2768

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: trunk
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2435,6 +2435,20 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
}
}

/**
* Requests the namenode to refresh protected directories from config.
* See {@link ClientProtocol#refreshProtectedDirectories()}
* for more details.
*
* @see ClientProtocol#refreshProtectedDirectories()
*/
public void refreshProtectedDirectories() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("refreshProtectedDirectories")) {
namenode.refreshProtectedDirectories();
}
}

/**
* @see ClientProtocol#finalizeUpgrade()
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2012,6 +2012,15 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
dfs.setBalancerBandwidth(bandwidth);
}

/**
* Requests the namenode to refresh protected directories from config.
*
* @throws IOException
*/
public void refreshProtectedDirectories() throws IOException {
dfs.refreshProtectedDirectories();
}

/**
* Get a canonical service name for this file system. If the URI is logical,
* the hostname part of the URI will be returned.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1041,6 +1041,14 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
@Idempotent
void setBalancerBandwidth(long bandwidth) throws IOException;

/**
* Tell namenode to refresh protected directories from config.
*
* @throws IOException If an I/O error occurred.
*/
@Idempotent
void refreshProtectedDirectories() throws IOException;

/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
Expand Down Expand Up @@ -289,6 +290,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST =
RefreshNodesRequestProto.newBuilder().build();

private final static RefreshProtectedDirectoriesRequestProto
VOID_REFRESH_PROTECTED_DIR_REQUEST =
RefreshProtectedDirectoriesRequestProto.newBuilder().build();

private final static FinalizeUpgradeRequestProto
VOID_FINALIZE_UPGRADE_REQUEST =
FinalizeUpgradeRequestProto.newBuilder().build();
Expand Down Expand Up @@ -1185,6 +1190,16 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
}
}

@Override
public void refreshProtectedDirectories() throws IOException {
try {
rpcProxy.refreshProtectedDirectories(null,
VOID_REFRESH_PROTECTED_DIR_REQUEST);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}

@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -761,6 +761,12 @@ message SetBalancerBandwidthResponseProto { // void response
message GetDataEncryptionKeyRequestProto { // no parameters
}

message RefreshProtectedDirectoriesRequestProto { // no parameters
}

message RefreshProtectedDirectoriesResponseProto { // void response
}

message GetDataEncryptionKeyResponseProto {
optional DataEncryptionKeyProto dataEncryptionKey = 1;
}
Expand Down Expand Up @@ -982,6 +988,8 @@ service ClientNamenodeProtocol {
returns(hadoop.common.CancelDelegationTokenResponseProto);
rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
returns(SetBalancerBandwidthResponseProto);
rpc refreshProtectedDirectories(RefreshProtectedDirectoriesRequestProto)
returns(RefreshProtectedDirectoriesResponseProto);
rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
returns(GetDataEncryptionKeyResponseProto);
rpc createSnapshot(CreateSnapshotRequestProto)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1188,6 +1188,14 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
rpcClient.invokeConcurrent(nss, method, true, false);
}

@Override
public void refreshProtectedDirectories() throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("refreshProtectedDirectories");
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, true, true);
}

@Override
public ContentSummary getContentSummary(String path) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1211,6 +1211,11 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
clientProto.setBalancerBandwidth(bandwidth);
}

@Override // ClientProtocol
public void refreshProtectedDirectories() throws IOException {
clientProto.refreshProtectedDirectories();
}

@Override // ClientProtocol
public ContentSummary getContentSummary(String path) throws IOException {
return clientProto.getContentSummary(path);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
import java.util.Map;
import java.util.stream.Collectors;

import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesResponseProto;
import org.apache.hadoop.thirdparty.protobuf.ByteString;
import org.apache.hadoop.thirdparty.protobuf.ProtocolStringList;
import org.apache.hadoop.classification.InterfaceAudience;
Expand Down Expand Up @@ -445,6 +447,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
VOID_SATISFYSTORAGEPOLICY_RESPONSE = SatisfyStoragePolicyResponseProto
.getDefaultInstance();

private static final RefreshProtectedDirectoriesResponseProto
VOID_REFRESHPROTECTEDDIRECTORIES_RESPONSE =
RefreshProtectedDirectoriesResponseProto.newBuilder().build();

/**
* Constructor
*
Expand Down Expand Up @@ -1258,6 +1264,18 @@ public SetBalancerBandwidthResponseProto setBalancerBandwidth(
}
}

@Override
public RefreshProtectedDirectoriesResponseProto refreshProtectedDirectories(
RpcController controller, RefreshProtectedDirectoriesRequestProto req)
throws ServiceException {
try {
server.refreshProtectedDirectories();
return VOID_REFRESHPROTECTEDDIRECTORIES_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
}
}

@Override
public GetDataEncryptionKeyResponseProto getDataEncryptionKey(
RpcController controller, GetDataEncryptionKeyRequestProto request)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,9 @@
package org.apache.hadoop.hdfs.server.namenode;

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.hdfs.util.ProtectedDirsConfigReader;

import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException;

Expand Down Expand Up @@ -525,23 +524,9 @@ public boolean isImageLoaded() {
*/
@VisibleForTesting
static SortedSet<String> parseProtectedDirectories(Configuration conf) {
return parseProtectedDirectories(conf
.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES));
}

/**
* Parse configuration setting dfs.namenode.protected.directories to retrieve
* the set of protected directories.
*
* @param protectedDirsString
* a comma separated String representing a bunch of paths.
* @return a TreeSet
*/
@VisibleForTesting
static SortedSet<String> parseProtectedDirectories(
final String protectedDirsString) {
return parseProtectedDirectories(StringUtils
.getTrimmedStringCollection(protectedDirsString));
return parseProtectedDirectories(
ProtectedDirsConfigReader.parseProtectedDirsFromConfig(
conf.getTrimmed(FS_PROTECTED_DIRECTORIES)));
}

private static SortedSet<String> parseProtectedDirectories(
Expand All @@ -560,22 +545,16 @@ public boolean isProtectedSubDirectoriesEnable() {
}

/**
* Set directories that cannot be removed unless empty, even by an
* Refresh directories that cannot be removed unless empty, even by an
* administrator.
*
* @param protectedDirsString
* comma separated list of protected directories
*/
String setProtectedDirectories(String protectedDirsString) {
if (protectedDirsString == null) {
protectedDirectories = new TreeSet<>();
} else {
protectedDirectories = parseProtectedDirectories(protectedDirsString);
}

return Joiner.on(",").skipNulls().join(protectedDirectories);
void refreshProtectedDirectories(Configuration newConf) {
LOG.info("Refresh protected directories from config file");
protectedDirectories = parseProtectedDirectories(newConf);
}


BlockManager getBlockManager() {
return getFSNamesystem().getBlockManager();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4981,6 +4981,14 @@ void setBalancerBandwidth(long bandwidth) throws IOException {
logAuditEvent(true, operationName, null);
}

void refreshProtectedDirectories() throws IOException {
String operationName = "refreshProtectedDirs";
checkOperation(OperationCategory.UNCHECKED);
checkSuperuserPrivilege(operationName);
getFSDirectory().refreshProtectedDirectories(new HdfsConfiguration());
logAuditEvent(true, operationName, null);
}

boolean setSafeMode(SafeModeAction action) throws IOException {
String operationName = action.toString().toLowerCase();
boolean error = false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT;
Expand Down Expand Up @@ -320,7 +319,6 @@ public enum OperationCategory {
.newTreeSet(Lists.newArrayList(
DFS_HEARTBEAT_INTERVAL_KEY,
DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
FS_PROTECTED_DIRECTORIES,
HADOOP_CALLER_CONTEXT_ENABLED_KEY,
DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
Expand Down Expand Up @@ -2170,8 +2168,6 @@ protected String reconfigurePropertyImpl(String property, String newVal)
return reconfHeartbeatInterval(datanodeManager, property, newVal);
} else if (property.equals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)) {
return reconfHeartbeatRecheckInterval(datanodeManager, property, newVal);
} else if (property.equals(FS_PROTECTED_DIRECTORIES)) {
return reconfProtectedDirectories(newVal);
} else if (property.equals(HADOOP_CALLER_CONTEXT_ENABLED_KEY)) {
return reconfCallerContextEnabled(newVal);
} else if (property.equals(ipcClientRPCBackoffEnable)) {
Expand Down Expand Up @@ -2296,9 +2292,6 @@ private String reconfHeartbeatRecheckInterval(
}
}

private String reconfProtectedDirectories(String newVal) {
return getNamesystem().getFSDirectory().setProtectedDirectories(newVal);
}

private String reconfCallerContextEnabled(String newVal) {
Boolean callerContextEnabled;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1453,7 +1453,19 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
checkNNStartup();
namesystem.setBalancerBandwidth(bandwidth);
}


/**
* Tell namenode to refresh protected directories from config.
*
* @throws IOException If an I/O error occurred.
*/
@Override // ClientProtocol
public void refreshProtectedDirectories() throws IOException {
checkNNStartup();
namesystem.refreshProtectedDirectories();
}


@Override // ClientProtocol
public ContentSummary getContentSummary(String path) throws IOException {
checkNNStartup();
Expand Down
Loading