Skip to content

Commit

Permalink
YARN-11664: Remove HDFS Binaries/Jars Dependency From Yarn
Browse files Browse the repository at this point in the history
  • Loading branch information
shameersss1 committed Mar 29, 2024
1 parent adab3a2 commit 1597445
Show file tree
Hide file tree
Showing 9 changed files with 58 additions and 15 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.fs;

import org.apache.hadoop.io.Text;

/**
* This class contains constants for configuration keys and default values.
*/
public final class HdfsCommonConstants {

/**
* HDFS delegation kind: {@value}
*/
public static final Text HDFS_DELEGATION_KIND =
new Text("HDFS_DELEGATION_TOKEN");

/**
* DFS admin configuration: {@value}
*/
public static final String DFS_ADMIN = "dfs.cluster.administrators";

private HdfsCommonConstants() {
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import java.util.Map;

import org.apache.commons.collections.map.LRUMap;
import org.apache.hadoop.fs.HdfsCommonConstants;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.Text;
Expand All @@ -41,8 +42,7 @@
@InterfaceAudience.Private
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
public static final Text HDFS_DELEGATION_KIND =
new Text("HDFS_DELEGATION_TOKEN");
public static final Text HDFS_DELEGATION_KIND = HdfsCommonConstants.HDFS_DELEGATION_KIND;

@SuppressWarnings("unchecked")
private static Map<TokenIdentifier, UserGroupInformation> ugiCache =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.hdfs;

import org.apache.hadoop.fs.HdfsCommonConstants;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
Expand Down Expand Up @@ -381,7 +382,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {

public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
public static final String DFS_ADMIN = "dfs.cluster.administrators";
public static final String DFS_ADMIN = HdfsCommonConstants.DFS_ADMIN;
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
public static final String DFS_SERVER_HTTPS_KEYPASSWORD_KEY = "ssl.server.keystore.keypassword";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
import org.apache.commons.cli.Options;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.HdfsCommonConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;

import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
Expand Down Expand Up @@ -230,7 +230,7 @@ private static void removeHdfsDelegationToken(UserGroupInformation user) {
while (iter.hasNext()) {
Token<? extends TokenIdentifier> token = iter.next();
if (token.getKind().equals(
DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
HdfsCommonConstants.HDFS_DELEGATION_KIND)) {
LOG.info("Remove HDFS delegation token {}.", token);
iter.remove();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HdfsCommonConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.registry.client.api.RegistryConstants;
Expand Down Expand Up @@ -1707,12 +1707,12 @@ private boolean checkPermissions(Path dependencyLibTarGzip) throws
YarnConfiguration.YARN_ADMIN_ACL,
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
AccessControlList dfsAdminAcl = new AccessControlList(
getConfig().get(DFSConfigKeys.DFS_ADMIN, " "));
getConfig().get(HdfsCommonConstants.DFS_ADMIN, " "));
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
if (!yarnAdminAcl.isUserAllowed(ugi) && !dfsAdminAcl.isUserAllowed(ugi)) {
LOG.error("User must be on the {} or {} list to have permission to " +
"upload AM dependency tarball", YarnConfiguration.YARN_ADMIN_ACL,
DFSConfigKeys.DFS_ADMIN);
HdfsCommonConstants.DFS_ADMIN);
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@

import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.HdfsCommonConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.client.cli.ApplicationCLI;
Expand Down Expand Up @@ -138,7 +138,7 @@ public void setup() throws Throwable {
basedir.mkdirs();
}
yarnAdminNoneAclProp = YarnConfiguration.YARN_ADMIN_ACL + "=none";
dfsAdminAclProp = DFSConfigKeys.DFS_ADMIN + "=" +
dfsAdminAclProp = HdfsCommonConstants.DFS_ADMIN + "=" +
UserGroupInformation.getCurrentUser();
System.setProperty(YarnServiceConstants.PROPERTY_LIB_DIR, basedir
.getAbsolutePath());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ClusterStorageCapacityExceededException;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
Expand All @@ -60,7 +61,6 @@
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.io.Writable;
Expand Down Expand Up @@ -547,7 +547,7 @@ public void append(LogKey logKey, LogValue logValue) throws IOException {
}

@Override
public void close() throws DSQuotaExceededException {
public void close() throws ClusterStorageCapacityExceededException {
try {
if (writer != null) {
writer.close();
Expand All @@ -557,7 +557,7 @@ public void close() throws DSQuotaExceededException {
} finally {
try {
this.fsDataOStream.close();
} catch (DSQuotaExceededException e) {
} catch (ClusterStorageCapacityExceededException e) {
LOG.error("Exception in closing {}",
this.fsDataOStream.getClass(), e);
throw e;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;

import org.apache.hadoop.fs.ClusterStorageCapacityExceededException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.logaggregation.ContainerLogFileInfo;
import org.apache.hadoop.yarn.logaggregation.ExtendedLogMetaRequest;
Expand All @@ -43,7 +44,6 @@
import org.apache.hadoop.fs.HarFs;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
Expand Down Expand Up @@ -99,7 +99,7 @@ public void closeWriter() throws LogAggregationDFSException {
if (this.writer != null) {
try {
this.writer.close();
} catch (DSQuotaExceededException e) {
} catch (ClusterStorageCapacityExceededException e) {
throw new LogAggregationDFSException(e);
} finally {
this.writer = null;
Expand Down

0 comments on commit 1597445

Please sign in to comment.