Skip to content

HDFS-17055 Export HAState as a metric from Namenode for monitoring #5790

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jul 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;

Expand All @@ -68,6 +69,7 @@
* </ol>
*/
@InterfaceAudience.Private
@Metrics(context="dfs")
public class BackupNode extends NameNode {
private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
Expand Down Expand Up @@ -246,6 +248,7 @@
* NameNode state, for example partial blocksMap etc.
**********************************************************/
@InterfaceAudience.Private
@Metrics(context="dfs")
public class NameNode extends ReconfigurableBase implements
NameNodeStatusMXBean, TokenVerifier<DelegationTokenIdentifier> {
static{
Expand Down Expand Up @@ -1049,6 +1052,7 @@ protected NameNode(Configuration conf, NamenodeRole role)
DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE,
DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT);
this.started.set(true);
DefaultMetricsSystem.instance().register(this);
}

private void stopAtException(Exception e){
Expand Down Expand Up @@ -1119,6 +1123,7 @@ public void stop() {
levelDBAliasMapServer.close();
}
}
started.set(false);
tracer.close();
}

Expand Down Expand Up @@ -1951,6 +1956,26 @@ synchronized HAServiceState getServiceState() {
return state.getServiceState();
}

/**
* Emit Namenode HA service state as an integer so that one can monitor NN HA
* state based on this metric.
*
* @return 0 when not fully started
* 1 for active or standalone (non-HA) NN
* 2 for standby
* 3 for observer
*
* These are the same integer values for the HAServiceState enum.
*/
@Metric({"NameNodeState", "Namenode HA service state"})
public int getNameNodeState() {
if (!isStarted() || state == null) {
return HAServiceState.INITIALIZING.ordinal();
}

return state.getServiceState().ordinal();
}

/**
* Register NameNodeStatusMXBean
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ public void testFinalize() throws Exception {
UpgradeUtilities.createEmptyDirs(dataNodeDirs);

log("Finalize NN & BP with existing previous dir", numDirs);
String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
String bpid = UpgradeUtilities.getCurrentBlockPoolID(null);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ public void testRollback() throws Exception {
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);

UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(null));
startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ public void testUpgrade() throws Exception {
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);

UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.getCurrentBlockPoolID(null));

startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
Expand All @@ -361,7 +361,7 @@ public void testUpgrade() throws Exception {
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);

UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.getCurrentBlockPoolID(null));

startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.util.function.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
Expand Down Expand Up @@ -128,6 +129,7 @@ private void addAppender(Log log, Appender appender) {
/**
* A NameNode that stubs out the NameSystem for testing.
*/
@Metrics(context="dfs")
private static class TestNameNode extends NameNode {
@Override
protected void loadNamesystem(Configuration conf) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;

import java.io.IOException;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
Expand All @@ -29,6 +31,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;

Expand Down Expand Up @@ -176,4 +179,56 @@ public void testHAInodeCount() throws Exception {
}

}

/**
* Test the getNameNodeState() API added to NameNode.java.
*
* @throws IOException
*/
@Test
public void testGetNameNodeState() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);

MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(
MiniDFSNNTopology.simpleHATopology(3)).numDataNodes(1).build();

cluster.waitActive();

NameNode nn0 = cluster.getNameNode(0);
NameNode nn1 = cluster.getNameNode(1);
NameNode nn2 = cluster.getNameNode(2);

// All namenodes are in standby by default
assertEquals(HAServiceProtocol.HAServiceState.STANDBY.ordinal(),
nn0.getNameNodeState());
assertEquals(HAServiceProtocol.HAServiceState.STANDBY.ordinal(),
nn1.getNameNodeState());
assertEquals(HAServiceProtocol.HAServiceState.STANDBY.ordinal(),
nn2.getNameNodeState());

// Transition nn0 to be active
cluster.transitionToActive(0);
assertEquals(HAServiceProtocol.HAServiceState.ACTIVE.ordinal(),
nn0.getNameNodeState());

// Transition nn1 to be active
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertEquals(HAServiceProtocol.HAServiceState.STANDBY.ordinal(),
nn0.getNameNodeState());
assertEquals(HAServiceProtocol.HAServiceState.ACTIVE.ordinal(),
nn1.getNameNodeState());

// Transition nn2 to observer
cluster.transitionToObserver(2);
assertEquals(HAServiceProtocol.HAServiceState.OBSERVER.ordinal(),
nn2.getNameNodeState());

// Shutdown nn2. Now getNameNodeState should return the INITIALIZING state.
cluster.shutdownNameNode(2);
assertEquals(HAServiceProtocol.HAServiceState.INITIALIZING.ordinal(),
nn2.getNameNodeState());
}
}