Skip to content
This repository has been archived by the owner on Jul 22, 2022. It is now read-only.

Commit

Permalink
HDFS-14758. Make lease hard limit configurable and reduce the default.
Browse files Browse the repository at this point in the history
Contributed by hemanthboyina.
  • Loading branch information
kihwal committed Feb 11, 2020
1 parent e637797 commit 9b8a78d
Show file tree
Hide file tree
Showing 12 changed files with 50 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -577,10 +577,10 @@ public boolean renewLease() throws IOException {
} catch (IOException e) {
// Abort if the lease has already expired.
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= hard-limit ="
+ (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+ (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) "
+ "Closing all files being written ...", e);
closeAllFilesBeingWritten(true);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,9 @@ public interface HdfsClientConfigKeys {
String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
"dfs.provided.aliasmap.inmemory.dnrpc-address";

String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;

/**
* These are deprecated config keys to client code.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ public class DfsClientConf {
private final boolean dataTransferTcpNoDelay;

private final boolean deadNodeDetectionEnabled;
private final long leaseHardLimitPeriod;

public DfsClientConf(Configuration conf) {
// The hdfsTimeout is currently the same as the ipc timeout
Expand Down Expand Up @@ -285,6 +286,10 @@ public DfsClientConf(Configuration conf) {
HdfsClientConfigKeys.StripedRead.THREADPOOL_SIZE_KEY +
" must be greater than 0.");
replicaAccessorBuilderClasses = loadReplicaAccessorBuilderClasses(conf);

leaseHardLimitPeriod =
conf.getLong(HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000;
}

@SuppressWarnings("unchecked")
Expand Down Expand Up @@ -618,6 +623,13 @@ public boolean isDeadNodeDetectionEnabled() {
return deadNodeDetectionEnabled;
}

/**
* @return the leaseHardLimitPeriod
*/
public long getleaseHardLimitPeriod() {
return leaseHardLimitPeriod;
}

/**
* @return the replicaAccessorBuilderClasses
*/
Expand Down
11 changes: 0 additions & 11 deletions ...oject/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -116,17 +116,6 @@ public final class HdfsConstants {
* lease, another client can preempt the lease.
*/
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by a
* {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
* limit. If after the hard limit expires and the client has failed to renew
* the lease, HDFS assumes that the client has quit and will automatically
* close the file on behalf of the writer, and recover the lease.
*/
public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;

// SafeMode actions
public enum SafeModeAction {
Expand Down
6 changes: 6 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -1741,4 +1741,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
@Deprecated
public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT;

public static final String DFS_LEASE_HARDLIMIT_KEY =
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_KEY;
public static final long DFS_LEASE_HARDLIMIT_DEFAULT =
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT;

}
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
Expand Down Expand Up @@ -84,7 +86,7 @@ public class LeaseManager {
.getName());
private final FSNamesystem fsnamesystem;
private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
private long hardLimit;
static final int INODE_FILTER_WORKER_COUNT_MAX = 4;
static final int INODE_FILTER_WORKER_TASK_MIN = 512;
private long lastHolderUpdateTime;
Expand Down Expand Up @@ -112,7 +114,10 @@ public int compare(Lease o1, Lease o2) {
private volatile boolean shouldRunMonitor;

LeaseManager(FSNamesystem fsnamesystem) {
Configuration conf = new Configuration();
this.fsnamesystem = fsnamesystem;
this.hardLimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000;
updateInternalLeaseHolder();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5745,4 +5745,12 @@
octal or symbolic.
</description>
</property>

<property>
<name>dfs.namenode.lease-hard-limit-sec</name>
<value>1200</value>
<description>
Determines the namenode automatic lease recovery interval in seconds.
</description>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
Expand Down Expand Up @@ -107,7 +106,9 @@ private void recoverFile(final FileSystem fs) throws Exception {

// set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request
cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
cluster.setLeasePeriod(1,
conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT));

// Trying recovery
int tries = 60;
Expand Down
5 changes: 3 additions & 2 deletions hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,10 @@ public void testLeaseAbort() throws Exception {
Assert.fail("Write failed.");
}

long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000;
// make it look like the hard limit has been exceeded.
dfs.lastLeaseRenewal = Time.monotonicNow()
- HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.lastLeaseRenewal = Time.monotonicNow() - hardlimit - 1000;
dfs.renewLease();

// this should not work.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,10 +407,10 @@ public void testSoftLeaseRecovery() throws Exception {
Map<String, String []> u2g_map = new HashMap<String, String []>(1);
u2g_map.put(fakeUsername, new String[] {fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);

long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000;
// Reset default lease periods
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsConstants.LEASE_HARDLIMIT_PERIOD);
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, hardlimit);
//create a file
// create a random file name
String filestr = "/foo" + AppendTestUtil.nextInt();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,8 @@ public void testTruncateFailure() throws IOException {

NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsConstants.LEASE_HARDLIMIT_PERIOD);
conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000);

checkFullFile(p, newLength, contents);
fs.delete(p, false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,8 @@ public void testLease() throws Exception {
}
} finally {
NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsConstants.LEASE_HARDLIMIT_PERIOD);
conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000);
}
}
}

0 comments on commit 9b8a78d

Please sign in to comment.