Skip to content

Commit 0224917

Browse files
tomscuttasanuma
authored andcommitted
HDFS-16048. RBF: Print network topology on the router web (#3062)
Reviewed-by: Inigo Goiri <inigoiri@apache.org> Reviewed-by: Hemanth Boyina <hemanthboyina@apache.org> Reviewed-by: Akira Ajisaka <aajisaka@apache.org> (cherry picked from commit c748fce)
1 parent 46d4b51 commit 0224917

File tree

7 files changed

+290
-5
lines changed

7 files changed

+290
-5
lines changed

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,9 @@ private static void setupServlets(
125125
RouterFsckServlet.PATH_SPEC,
126126
RouterFsckServlet.class,
127127
true);
128+
httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME,
129+
RouterNetworkTopologyServlet.PATH_SPEC,
130+
RouterNetworkTopologyServlet.class);
128131
}
129132

130133
public InetSocketAddress getHttpAddress() {
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.federation.router;
19+
20+
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
21+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
22+
import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet;
23+
import org.apache.hadoop.net.Node;
24+
import org.apache.hadoop.util.StringUtils;
25+
26+
import javax.servlet.ServletContext;
27+
import javax.servlet.http.HttpServletRequest;
28+
import javax.servlet.http.HttpServletResponse;
29+
import java.io.IOException;
30+
import java.io.PrintStream;
31+
import java.util.Arrays;
32+
import java.util.List;
33+
34+
/**
35+
* A servlet to print out the network topology from router.
36+
*/
37+
public class RouterNetworkTopologyServlet extends NetworkTopologyServlet {
38+
39+
@Override
40+
public void doGet(HttpServletRequest request, HttpServletResponse response)
41+
throws IOException {
42+
final ServletContext context = getServletContext();
43+
44+
String format = parseAcceptHeader(request);
45+
if (FORMAT_TEXT.equals(format)) {
46+
response.setContentType("text/plain; charset=UTF-8");
47+
} else if (FORMAT_JSON.equals(format)) {
48+
response.setContentType("application/json; charset=UTF-8");
49+
}
50+
51+
Router router = RouterHttpServer.getRouterFromContext(context);
52+
DatanodeInfo[] datanodeReport =
53+
router.getRpcServer().getDatanodeReport(
54+
HdfsConstants.DatanodeReportType.ALL);
55+
List<Node> datanodeInfos = Arrays.asList(datanodeReport);
56+
57+
try (PrintStream out = new PrintStream(
58+
response.getOutputStream(), false, "UTF-8")) {
59+
printTopology(out, datanodeInfos, format);
60+
} catch (Throwable t) {
61+
String errMsg = "Print network topology failed. "
62+
+ StringUtils.stringifyException(t);
63+
response.sendError(HttpServletResponse.SC_GONE, errMsg);
64+
throw new IOException(errMsg);
65+
} finally {
66+
response.getOutputStream().close();
67+
}
68+
}
69+
}

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
<li><a href="jmx">Metrics</a></li>
4949
<li><a href="conf">Configuration</a></li>
5050
<li><a href="stacks">Process Thread Dump</a></li>
51+
<li><a href="topology">Network Topology</a></li>
5152
</ul>
5253
</li>
5354
</ul>

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@
5252
<li><a href="jmx">Metrics</a></li>
5353
<li><a href="conf">Configuration</a></li>
5454
<li><a href="stacks">Process Thread Dump</a></li>
55+
<li><a href="topology">Network Topology</a></li>
5556
</ul>
5657
</li>
5758
</ul>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.federation.router;
19+
20+
import com.fasterxml.jackson.databind.JsonNode;
21+
import com.fasterxml.jackson.databind.ObjectMapper;
22+
import org.apache.hadoop.conf.Configuration;
23+
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
24+
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
25+
import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
26+
import org.apache.hadoop.io.IOUtils;
27+
import org.junit.BeforeClass;
28+
import org.junit.Test;
29+
30+
import java.io.ByteArrayOutputStream;
31+
import java.net.HttpURLConnection;
32+
import java.net.URL;
33+
import java.util.Iterator;
34+
import java.util.Map;
35+
36+
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE;
37+
import static org.junit.Assert.assertEquals;
38+
import static org.junit.Assert.assertTrue;
39+
40+
public class TestRouterNetworkTopologyServlet {
41+
42+
private static StateStoreDFSCluster clusterWithDatanodes;
43+
private static StateStoreDFSCluster clusterNoDatanodes;
44+
45+
@BeforeClass
46+
public static void setUp() throws Exception {
47+
// Builder configuration
48+
Configuration routerConf =
49+
new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
50+
routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true");
51+
Configuration hdfsConf = new Configuration(false);
52+
53+
// Build and start a federated cluster
54+
clusterWithDatanodes = new StateStoreDFSCluster(false, 2,
55+
MultipleDestinationMountTableResolver.class);
56+
clusterWithDatanodes.addNamenodeOverrides(hdfsConf);
57+
clusterWithDatanodes.addRouterOverrides(routerConf);
58+
clusterWithDatanodes.setNumDatanodesPerNameservice(9);
59+
clusterWithDatanodes.setIndependentDNs();
60+
clusterWithDatanodes.setRacks(
61+
new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
62+
"/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4",
63+
"/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6",
64+
"/rack6"});
65+
clusterWithDatanodes.startCluster();
66+
clusterWithDatanodes.startRouters();
67+
clusterWithDatanodes.waitClusterUp();
68+
clusterWithDatanodes.waitActiveNamespaces();
69+
70+
// Build and start a federated cluster
71+
clusterNoDatanodes = new StateStoreDFSCluster(false, 2,
72+
MultipleDestinationMountTableResolver.class);
73+
clusterNoDatanodes.addNamenodeOverrides(hdfsConf);
74+
clusterNoDatanodes.addRouterOverrides(routerConf);
75+
clusterNoDatanodes.setNumDatanodesPerNameservice(0);
76+
clusterNoDatanodes.setIndependentDNs();
77+
clusterNoDatanodes.startCluster();
78+
clusterNoDatanodes.startRouters();
79+
clusterNoDatanodes.waitClusterUp();
80+
clusterNoDatanodes.waitActiveNamespaces();
81+
}
82+
83+
@Test
84+
public void testPrintTopologyTextFormat() throws Exception {
85+
// get http Address
86+
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
87+
.getHttpServerAddress().toString();
88+
89+
// send http request
90+
URL url = new URL("http:/" + httpAddress + "/topology");
91+
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
92+
conn.setReadTimeout(20000);
93+
conn.setConnectTimeout(20000);
94+
conn.connect();
95+
96+
ByteArrayOutputStream out = new ByteArrayOutputStream();
97+
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
98+
StringBuilder sb =
99+
new StringBuilder("-- Network Topology -- \n");
100+
sb.append(out);
101+
sb.append("\n-- Network Topology -- ");
102+
String topology = sb.toString();
103+
104+
// assert rack info
105+
assertTrue(topology.contains("/ns0/rack1"));
106+
assertTrue(topology.contains("/ns0/rack2"));
107+
assertTrue(topology.contains("/ns0/rack3"));
108+
assertTrue(topology.contains("/ns1/rack4"));
109+
assertTrue(topology.contains("/ns1/rack5"));
110+
assertTrue(topology.contains("/ns1/rack6"));
111+
112+
// assert node number
113+
assertEquals(18,
114+
topology.split("127.0.0.1").length - 1);
115+
}
116+
117+
@Test
118+
public void testPrintTopologyJsonFormat() throws Exception {
119+
// get http Address
120+
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
121+
.getHttpServerAddress().toString();
122+
123+
// send http request
124+
URL url = new URL("http:/" + httpAddress + "/topology");
125+
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
126+
conn.setReadTimeout(20000);
127+
conn.setConnectTimeout(20000);
128+
conn.setRequestProperty("Accept", "application/json");
129+
conn.connect();
130+
131+
ByteArrayOutputStream out = new ByteArrayOutputStream();
132+
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
133+
String topology = out.toString();
134+
135+
// parse json
136+
JsonNode racks = new ObjectMapper().readTree(topology);
137+
138+
// assert rack number
139+
assertEquals(6, racks.size());
140+
141+
// assert rack info
142+
assertTrue(topology.contains("/ns0/rack1"));
143+
assertTrue(topology.contains("/ns0/rack2"));
144+
assertTrue(topology.contains("/ns0/rack3"));
145+
assertTrue(topology.contains("/ns1/rack4"));
146+
assertTrue(topology.contains("/ns1/rack5"));
147+
assertTrue(topology.contains("/ns1/rack6"));
148+
149+
// assert node number
150+
Iterator<JsonNode> elements = racks.elements();
151+
int dataNodesCount = 0;
152+
while(elements.hasNext()){
153+
JsonNode rack = elements.next();
154+
Iterator<Map.Entry<String, JsonNode>> fields = rack.fields();
155+
while (fields.hasNext()) {
156+
dataNodesCount += fields.next().getValue().size();
157+
}
158+
}
159+
assertEquals(18, dataNodesCount);
160+
}
161+
162+
@Test
163+
public void testPrintTopologyNoDatanodesTextFormat() throws Exception {
164+
// get http Address
165+
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
166+
.getHttpServerAddress().toString();
167+
168+
// send http request
169+
URL url = new URL("http:/" + httpAddress + "/topology");
170+
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
171+
conn.setReadTimeout(20000);
172+
conn.setConnectTimeout(20000);
173+
conn.connect();
174+
ByteArrayOutputStream out = new ByteArrayOutputStream();
175+
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
176+
StringBuilder sb =
177+
new StringBuilder("-- Network Topology -- \n");
178+
sb.append(out);
179+
sb.append("\n-- Network Topology -- ");
180+
String topology = sb.toString();
181+
182+
// assert node number
183+
assertTrue(topology.contains("No DataNodes"));
184+
}
185+
186+
@Test
187+
public void testPrintTopologyNoDatanodesJsonFormat() throws Exception {
188+
// get http Address
189+
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
190+
.getHttpServerAddress().toString();
191+
192+
// send http request
193+
URL url = new URL("http:/" + httpAddress + "/topology");
194+
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
195+
conn.setReadTimeout(20000);
196+
conn.setConnectTimeout(20000);
197+
conn.setRequestProperty("Accept", "application/json");
198+
conn.connect();
199+
ByteArrayOutputStream out = new ByteArrayOutputStream();
200+
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
201+
StringBuilder sb =
202+
new StringBuilder("-- Network Topology -- \n");
203+
sb.append(out);
204+
sb.append("\n-- Network Topology -- ");
205+
String topology = sb.toString();
206+
207+
// assert node number
208+
assertTrue(topology.contains("No DataNodes"));
209+
}
210+
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ private static void setupServlets(HttpServer2 httpServer) {
253253
httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME,
254254
IsNameNodeActiveServlet.PATH_SPEC,
255255
IsNameNodeActiveServlet.class);
256-
httpServer.addInternalServlet("topology",
256+
httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME,
257257
NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class);
258258
}
259259

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
@InterfaceAudience.Private
4747
public class NetworkTopologyServlet extends DfsServlet {
4848

49+
public static final String SERVLET_NAME = "topology";
4950
public static final String PATH_SPEC = "/topology";
5051

5152
protected static final String FORMAT_JSON = "json";
@@ -90,7 +91,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response)
9091
* @param leaves leaves nodes under base scope
9192
* @param format the response format
9293
*/
93-
public void printTopology(PrintStream stream, List<Node> leaves,
94+
protected void printTopology(PrintStream stream, List<Node> leaves,
9495
String format) throws BadFormatException, IOException {
9596
if (leaves.isEmpty()) {
9697
stream.print("No DataNodes");
@@ -120,7 +121,7 @@ public void printTopology(PrintStream stream, List<Node> leaves,
120121
}
121122
}
122123

123-
private void printJsonFormat(PrintStream stream, Map<String,
124+
protected void printJsonFormat(PrintStream stream, Map<String,
124125
TreeSet<String>> tree, ArrayList<String> racks) throws IOException {
125126
JsonFactory dumpFactory = new JsonFactory();
126127
JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream);
@@ -152,7 +153,7 @@ private void printJsonFormat(PrintStream stream, Map<String,
152153
}
153154
}
154155

155-
private void printTextFormat(PrintStream stream, Map<String,
156+
protected void printTextFormat(PrintStream stream, Map<String,
156157
TreeSet<String>> tree, ArrayList<String> racks) {
157158
for(String r : racks) {
158159
stream.println("Rack: " + r);
@@ -171,7 +172,7 @@ private void printTextFormat(PrintStream stream, Map<String,
171172
}
172173

173174
@VisibleForTesting
174-
static String parseAcceptHeader(HttpServletRequest request) {
175+
protected static String parseAcceptHeader(HttpServletRequest request) {
175176
String format = request.getHeader(HttpHeaders.ACCEPT);
176177
return format != null && format.contains(FORMAT_JSON) ?
177178
FORMAT_JSON : FORMAT_TEXT;

0 commit comments

Comments
 (0)