Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ private static void setupServlets(
RouterFsckServlet.PATH_SPEC,
RouterFsckServlet.class,
true);
httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME,
RouterNetworkTopologyServlet.PATH_SPEC,
RouterNetworkTopologyServlet.class);
}

public InetSocketAddress getHttpAddress() {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;

import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.StringUtils;

import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.List;

/**
* A servlet to print out the network topology from router.
*/
public class RouterNetworkTopologyServlet extends NetworkTopologyServlet {

@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws IOException {
final ServletContext context = getServletContext();

String format = parseAcceptHeader(request);
if (FORMAT_TEXT.equals(format)) {
response.setContentType("text/plain; charset=UTF-8");
} else if (FORMAT_JSON.equals(format)) {
response.setContentType("application/json; charset=UTF-8");
}

Router router = RouterHttpServer.getRouterFromContext(context);
DatanodeInfo[] datanodeReport =
router.getRpcServer().getDatanodeReport(
HdfsConstants.DatanodeReportType.ALL);
List<Node> datanodeInfos = Arrays.asList(datanodeReport);

try (PrintStream out = new PrintStream(
response.getOutputStream(), false, "UTF-8")) {
printTopology(out, datanodeInfos, format);
} catch (Throwable t) {
String errMsg = "Print network topology failed. "
+ StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_GONE, errMsg);
throw new IOException(errMsg);
} finally {
response.getOutputStream().close();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
<li><a href="jmx">Metrics</a></li>
<li><a href="conf">Configuration</a></li>
<li><a href="stacks">Process Thread Dump</a></li>
<li><a href="topology">Network Topology</a></li>
</ul>
</li>
</ul>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
<li><a href="jmx">Metrics</a></li>
<li><a href="conf">Configuration</a></li>
<li><a href="stacks">Process Thread Dump</a></li>
<li><a href="topology">Network Topology</a></li>
</ul>
</li>
</ul>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
import org.apache.hadoop.io.IOUtils;
import org.junit.BeforeClass;
import org.junit.Test;

import java.io.ByteArrayOutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Iterator;
import java.util.Map;

import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

public class TestRouterNetworkTopologyServlet {

private static StateStoreDFSCluster clusterWithDatanodes;
private static StateStoreDFSCluster clusterNoDatanodes;

@BeforeClass
public static void setUp() throws Exception {
// Builder configuration
Configuration routerConf =
new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true");
Configuration hdfsConf = new Configuration(false);

// Build and start a federated cluster
clusterWithDatanodes = new StateStoreDFSCluster(false, 2,
MultipleDestinationMountTableResolver.class);
clusterWithDatanodes.addNamenodeOverrides(hdfsConf);
clusterWithDatanodes.addRouterOverrides(routerConf);
clusterWithDatanodes.setNumDatanodesPerNameservice(9);
clusterWithDatanodes.setIndependentDNs();
clusterWithDatanodes.setRacks(
new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
"/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4",
"/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6",
"/rack6"});
clusterWithDatanodes.startCluster();
clusterWithDatanodes.startRouters();
clusterWithDatanodes.waitClusterUp();
clusterWithDatanodes.waitActiveNamespaces();

// Build and start a federated cluster
clusterNoDatanodes = new StateStoreDFSCluster(false, 2,
MultipleDestinationMountTableResolver.class);
clusterNoDatanodes.addNamenodeOverrides(hdfsConf);
clusterNoDatanodes.addRouterOverrides(routerConf);
clusterNoDatanodes.setNumDatanodesPerNameservice(0);
clusterNoDatanodes.setIndependentDNs();
clusterNoDatanodes.startCluster();
clusterNoDatanodes.startRouters();
clusterNoDatanodes.waitClusterUp();
clusterNoDatanodes.waitActiveNamespaces();
}

@Test
public void testPrintTopologyTextFormat() throws Exception {
// get http Address
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();

// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();

ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();

// assert rack info
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));

// assert node number
assertEquals(18,
topology.split("127.0.0.1").length - 1);
}

@Test
public void testPrintTopologyJsonFormat() throws Exception {
// get http Address
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();

// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();

ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
String topology = out.toString();

// parse json
JsonNode racks = new ObjectMapper().readTree(topology);

// assert rack number
assertEquals(6, racks.size());

// assert rack info
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));

// assert node number
Iterator<JsonNode> elements = racks.elements();
int dataNodesCount = 0;
while(elements.hasNext()){
JsonNode rack = elements.next();
Iterator<Map.Entry<String, JsonNode>> fields = rack.fields();
while (fields.hasNext()) {
dataNodesCount += fields.next().getValue().size();
}
}
assertEquals(18, dataNodesCount);
}

@Test
public void testPrintTopologyNoDatanodesTextFormat() throws Exception {
// get http Address
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();

// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();

// assert node number
assertTrue(topology.contains("No DataNodes"));
}

@Test
public void testPrintTopologyNoDatanodesJsonFormat() throws Exception {
// get http Address
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();

// send http request
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();

// assert node number
assertTrue(topology.contains("No DataNodes"));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ private static void setupServlets(HttpServer2 httpServer) {
httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME,
IsNameNodeActiveServlet.PATH_SPEC,
IsNameNodeActiveServlet.class);
httpServer.addInternalServlet("topology",
httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME,
NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
@InterfaceAudience.Private
public class NetworkTopologyServlet extends DfsServlet {

public static final String SERVLET_NAME = "topology";
public static final String PATH_SPEC = "/topology";

protected static final String FORMAT_JSON = "json";
Expand Down Expand Up @@ -90,7 +91,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response)
* @param leaves leaves nodes under base scope
* @param format the response format
*/
public void printTopology(PrintStream stream, List<Node> leaves,
protected void printTopology(PrintStream stream, List<Node> leaves,
String format) throws BadFormatException, IOException {
if (leaves.isEmpty()) {
stream.print("No DataNodes");
Expand Down Expand Up @@ -120,7 +121,7 @@ public void printTopology(PrintStream stream, List<Node> leaves,
}
}

private void printJsonFormat(PrintStream stream, Map<String,
protected void printJsonFormat(PrintStream stream, Map<String,
TreeSet<String>> tree, ArrayList<String> racks) throws IOException {
JsonFactory dumpFactory = new JsonFactory();
JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream);
Expand Down Expand Up @@ -152,7 +153,7 @@ private void printJsonFormat(PrintStream stream, Map<String,
}
}

private void printTextFormat(PrintStream stream, Map<String,
protected void printTextFormat(PrintStream stream, Map<String,
TreeSet<String>> tree, ArrayList<String> racks) {
for(String r : racks) {
stream.println("Rack: " + r);
Expand All @@ -171,7 +172,7 @@ private void printTextFormat(PrintStream stream, Map<String,
}

@VisibleForTesting
static String parseAcceptHeader(HttpServletRequest request) {
protected static String parseAcceptHeader(HttpServletRequest request) {
String format = request.getHeader(HttpHeaders.ACCEPT);
return format != null && format.contains(FORMAT_JSON) ?
FORMAT_JSON : FORMAT_TEXT;
Expand Down