Skip to content

Commit

Permalink
Allow block reconstruction pending timeout to be refreshable
Browse files Browse the repository at this point in the history
  • Loading branch information
lfxy committed Jul 16, 2022
1 parent 8774f17 commit bdd9121
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1067,6 +1067,26 @@ public void setBlocksReplWorkMultiplier(int newVal) {
blocksReplWorkMultiplier = newVal;
}

/**
* Updates the value used for pendingReconstruction timeout, which is set by
* {@code DFSConfigKeys.
* DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY} initially.
*
* @param newVal - Must be a positive non-zero integer.
*/
public void setReconstructionPendingTimeout(int newVal) {
ensurePositiveInt(newVal,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY);
pendingReconstruction.setTimeout(newVal * 1000L);
}

/** Returns the current setting for pendingReconstruction timeout, set by
* {@code DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY}.
*/
public int getReconstructionPendingTimeout() {
return (int)(pendingReconstruction.getTimeout() / 1000L);
}

public int getDefaultStorageNum(BlockInfo block) {
switch (block.getBlockType()) {
case STRIPED: return ((BlockInfoStriped) block).getRealTotalBlockNum();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,14 @@ void start() {
timerThread.start();
}

public void setTimeout(long timeoutPeriod) {
this.timeout = timeoutPeriod;
}

public long getTimeout() {
return this.timeout;
}

/**
* Add a block to the list of pending reconstructions
* @param block The corresponding block
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;

import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
Expand Down Expand Up @@ -347,7 +349,8 @@ public enum OperationCategory {
DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY,
DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY));
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY));

private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
Expand Down Expand Up @@ -2207,7 +2210,8 @@ protected String reconfigurePropertyImpl(String property, String newVal)
} else if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)
|| property.equals(DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY)
|| property.equals(
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)) {
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION)
|| property.equals(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) {
return reconfReplicationParameters(newVal, property);
} else if (property.equals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY) || property
.equals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)) {
Expand Down Expand Up @@ -2253,6 +2257,14 @@ private String reconfReplicationParameters(final String newVal,
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT,
newVal));
newSetting = bm.getBlocksReplWorkMultiplier();
} else if (
property.equals(
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY)) {
bm.setReconstructionPendingTimeout(
adjustNewVal(
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT,
newVal));
newSetting = bm.getReconstructionPendingTimeout();
} else {
throw new IllegalArgumentException("Unexpected property " +
property + " in reconfReplicationParameters");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ public void setup() throws IOException {
config.setInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
12);
config.setInt(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
300);

cluster = new MiniDFSCluster.Builder(config)
.nnTopology(MiniDFSNNTopology.simpleSingleNN(0, 0))
Expand All @@ -72,6 +75,7 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());

cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, "20");
Expand All @@ -81,10 +85,14 @@ public void testParamsCanBeReconfigured() throws ReconfigurationException {
cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
"24");
cluster.getNameNode().reconfigurePropertyImpl(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
"180");

assertEquals(20, bm.getMaxReplicationStreams());
assertEquals(22, bm.getReplicationStreamsHardLimit());
assertEquals(24, bm.getBlocksReplWorkMultiplier());
assertEquals(180, bm.getReconstructionPendingTimeout());
}

/**
Expand All @@ -96,7 +104,8 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
String[] keys = new String[]{
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY
};

// Ensure we cannot set any of the parameters negative
Expand All @@ -112,6 +121,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());

for (String key : keys) {
ReconfigurationException e =
Expand All @@ -126,6 +136,7 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());

// Ensure none of the parameters can be set to a string value
for (String key : keys) {
Expand All @@ -139,5 +150,6 @@ public void testReconfigureFailsWithInvalidValues() throws Exception {
assertEquals(8, bm.getMaxReplicationStreams());
assertEquals(10, bm.getReplicationStreamsHardLimit());
assertEquals(12, bm.getBlocksReplWorkMultiplier());
assertEquals(300, bm.getReconstructionPendingTimeout());
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(19, outs.size());
assertEquals(20, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
Expand Down Expand Up @@ -1266,4 +1266,4 @@ public void testAllDatanodesReconfig()
outs.get(8));
}

}
}

0 comments on commit bdd9121

Please sign in to comment.