From 3247012f5c1e8cd03d79284858f78fca4d117d30 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 17:07:27 -0400 Subject: [PATCH] LLClient: Support host selection (#30523) Allows users of the Low Level REST client to specify which hosts a request should be run on. They implement the `NodeSelector` interface or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes are valid. Using it looks like: ``` Request request = new Request("POST", "/foo/_search"); RequestOptions options = request.getOptions().toBuilder(); options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); request.setOptions(options); ... ``` This introduces a new `Node` object which contains a `HttpHost` and the metadata about the host. At this point that metadata is just `version` and `roles` but I plan to add node attributes in a followup. The canonical way to **get** this metadata is to use the `Sniffer` to pull the information from the Elasticsearch cluster. I've marked this as "breaking-java" because it breaks custom implementations of `HostsSniffer` by renaming the interface to `NodesSniffer` and by changing it from returning a `List` to a `List`. It *shouldn't* break anyone else though. Because we expect to find it useful, this also implements `host_selector` support to `do` statements in the yaml tests. Using it looks a little like: ``` --- "example test": - skip: features: host_selector - do: host_selector: version: " - 7.0.0" # same syntax as skip apiname: something: true ``` The `do` section parses the `version` string into a host selector that uses the same version comparison logic as the `skip` section. When the `do` section is executed it passed the off to the `RestClient`, using the `ElasticsearchHostsSniffer` to sniff the required metadata. The idea is to use this in mixed version tests to target a specific version of Elasticsearch so we can be sure about the deprecation logging though we don't currently have any examples that need it. We do, however, have at least one open pull request that requires something like this to properly test it. Closes #21888 --- .../elasticsearch/client/DeadHostState.java | 19 +- .../java/org/elasticsearch/client/Node.java | 213 +++++++++++ .../elasticsearch/client/NodeSelector.java | 90 +++++ .../elasticsearch/client/RequestLogger.java | 6 +- .../elasticsearch/client/RequestOptions.java | 36 +- .../org/elasticsearch/client/Response.java | 2 +- .../org/elasticsearch/client/RestClient.java | 335 +++++++++++++----- .../client/RestClientBuilder.java | 24 +- .../client/DeadHostStateTests.java | 33 +- .../client/HostsTrackingFailureListener.java | 17 +- .../client/NodeSelectorTests.java | 71 ++++ .../org/elasticsearch/client/NodeTests.java | 71 ++++ .../client/RequestOptionsTests.java | 9 +- .../client/RestClientBuilderTests.java | 31 +- .../RestClientMultipleHostsIntegTests.java | 64 +++- .../client/RestClientMultipleHostsTests.java | 106 ++++-- .../client/RestClientSingleHostTests.java | 16 +- .../elasticsearch/client/RestClientTests.java | 225 ++++++++++-- .../RestClientDocumentation.java | 43 ++- ...er.java => ElasticsearchNodesSniffer.java} | 146 ++++++-- .../{HostsSniffer.java => NodesSniffer.java} | 8 +- .../client/sniff/SniffOnFailureListener.java | 4 +- .../elasticsearch/client/sniff/Sniffer.java | 28 +- .../client/sniff/SnifferBuilder.java | 18 +- .../ElasticsearchNodesSnifferParseTests.java | 109 ++++++ ...va => ElasticsearchNodesSnifferTests.java} | 111 +++--- ...ostsSniffer.java => MockNodesSniffer.java} | 9 +- .../sniff/SniffOnFailureListenerTests.java | 5 +- .../client/sniff/SnifferBuilderTests.java | 6 +- .../client/sniff/SnifferTests.java | 110 +++--- .../documentation/SnifferDocumentation.java | 29 +- .../src/test/resources/2.0.0_nodes_http.json | 141 ++++++++ .../src/test/resources/5.0.0_nodes_http.json | 169 +++++++++ .../src/test/resources/6.0.0_nodes_http.json | 169 +++++++++ client/sniffer/src/test/resources/readme.txt | 4 + .../high-level/getting-started.asciidoc | 10 + docs/java-rest/low-level/sniffer.asciidoc | 10 +- docs/java-rest/low-level/usage.asciidoc | 35 +- .../rest-api-spec/test/README.asciidoc | 18 + test/framework/build.gradle | 1 + .../rest/yaml/ClientYamlDocsTestClient.java | 21 +- .../test/rest/yaml/ClientYamlTestClient.java | 44 ++- .../yaml/ClientYamlTestExecutionContext.java | 18 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 29 +- .../test/rest/yaml/Features.java | 1 + .../test/rest/yaml/parser/package-info.java | 24 -- .../rest/yaml/section/ApiCallSection.java | 17 + .../yaml/section/ClientYamlTestSection.java | 7 + .../test/rest/yaml/section/DoSection.java | 78 +++- .../test/rest/yaml/section/SkipSection.java | 2 +- .../ClientYamlTestExecutionContextTests.java | 4 +- .../section/ClientYamlTestSectionTests.java | 31 +- .../rest/yaml/section/DoSectionTests.java | 45 +++ .../exporter/http/HttpExporter.java | 12 +- .../exporter/http/NodeFailureListener.java | 6 +- .../exporter/http/HttpExporterTests.java | 7 +- .../http/NodeFailureListenerTests.java | 9 +- 57 files changed, 2433 insertions(+), 473 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/Node.java create mode 100644 client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeTests.java rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSniffer.java => ElasticsearchNodesSniffer.java} (50%) rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{HostsSniffer.java => NodesSniffer.java} (85%) create mode 100644 client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSnifferTests.java => ElasticsearchNodesSnifferTests.java} (76%) rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{MockHostsSniffer.java => MockNodesSniffer.java} (78%) create mode 100644 client/sniffer/src/test/resources/2.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/5.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/6.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/readme.txt delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 452e71b14d93a..2a62adb285ad6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -29,7 +29,7 @@ final class DeadHostState implements Comparable { private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); - private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); private final int failedAttempts; private final long deadUntilNanos; @@ -55,12 +55,12 @@ final class DeadHostState implements Comparable { * * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt */ - DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) { + DeadHostState(DeadHostState previousDeadHostState) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos; + this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; - this.timeSupplier = timeSupplier; + this.timeSupplier = previousDeadHostState.timeSupplier; } /** @@ -86,6 +86,10 @@ int getFailedAttempts() { @Override public int compareTo(DeadHostState other) { + if (timeSupplier != other.timeSupplier) { + throw new IllegalArgumentException("can't compare DeadHostStates with different clocks [" + + timeSupplier + " != " + other.timeSupplier + "]"); + } return Long.compare(deadUntilNanos, other.deadUntilNanos); } @@ -94,6 +98,7 @@ public String toString() { return "DeadHostState{" + "failedAttempts=" + failedAttempts + ", deadUntilNanos=" + deadUntilNanos + + ", timeSupplier=" + timeSupplier + '}'; } @@ -101,12 +106,16 @@ public String toString() { * Time supplier that makes timing aspects pluggable to ease testing */ interface TimeSupplier { - TimeSupplier DEFAULT = new TimeSupplier() { @Override public long nanoTime() { return System.nanoTime(); } + + @Override + public String toString() { + return "nanoTime"; + } }; long nanoTime(); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java new file mode 100644 index 0000000000000..d66d0773016e6 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Objects; +import java.util.Set; + +import org.apache.http.HttpHost; + +/** + * Metadata about an {@link HttpHost} running Elasticsearch. + */ +public class Node { + /** + * Address that this host claims is its primary contact point. + */ + private final HttpHost host; + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + private final Set boundHosts; + /** + * Name of the node as configured by the {@code node.name} attribute. + */ + private final String name; + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + private final String version; + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + private final Roles roles; + + /** + * Create a {@linkplain Node} with metadata. All parameters except + * {@code host} are nullable and implementations of {@link NodeSelector} + * need to decide what to do in their absence. + */ + public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + if (host == null) { + throw new IllegalArgumentException("host cannot be null"); + } + this.host = host; + this.boundHosts = boundHosts; + this.name = name; + this.version = version; + this.roles = roles; + } + + /** + * Create a {@linkplain Node} without any metadata. + */ + public Node(HttpHost host) { + this(host, null, null, null, null); + } + + /** + * Contact information for the host. + */ + public HttpHost getHost() { + return host; + } + + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + public Set getBoundHosts() { + return boundHosts; + } + + /** + * The {@code node.name} of the node. + */ + public String getName() { + return name; + } + + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + public String getVersion() { + return version; + } + + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + public Roles getRoles() { + return roles; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("[host=").append(host); + if (boundHosts != null) { + b.append(", bound=").append(boundHosts); + } + if (name != null) { + b.append(", name=").append(name); + } + if (version != null) { + b.append(", version=").append(version); + } + if (roles != null) { + b.append(", roles=").append(roles); + } + return b.append(']').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Node other = (Node) obj; + return host.equals(other.host) + && Objects.equals(boundHosts, other.boundHosts) + && Objects.equals(name, other.name) + && Objects.equals(version, other.version) + && Objects.equals(roles, other.roles); + } + + @Override + public int hashCode() { + return Objects.hash(host, boundHosts, name, version, roles); + } + + /** + * Role information about an Elasticsearch process. + */ + public static final class Roles { + private final boolean masterEligible; + private final boolean data; + private final boolean ingest; + + public Roles(boolean masterEligible, boolean data, boolean ingest) { + this.masterEligible = masterEligible; + this.data = data; + this.ingest = ingest; + } + + /** + * Teturns whether or not the node could be elected master. + */ + public boolean isMasterEligible() { + return masterEligible; + } + /** + * Teturns whether or not the node stores data. + */ + public boolean isData() { + return data; + } + /** + * Teturns whether or not the node runs ingest pipelines. + */ + public boolean isIngest() { + return ingest; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(3); + if (masterEligible) result.append('m'); + if (data) result.append('d'); + if (ingest) result.append('i'); + return result.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Roles other = (Roles) obj; + return masterEligible == other.masterEligible + && data == other.data + && ingest == other.ingest; + } + + @Override + public int hashCode() { + return Objects.hash(masterEligible, data, ingest); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java new file mode 100644 index 0000000000000..5f5296fe16b13 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Iterator; + +/** + * Selects nodes that can receive requests. Used to keep requests away + * from master nodes or to send them to nodes with a particular attribute. + * Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}. + */ +public interface NodeSelector { + /** + * Select the {@link Node}s to which to send requests. This is called with + * a mutable {@link Iterable} of {@linkplain Node}s in the order that the + * rest client would prefer to use them and implementers should remove + * nodes from the that should not receive the request. Implementers may + * iterate the nodes as many times as they need. + *

+ * This may be called twice per request: first for "living" nodes that + * have not been blacklisted by previous errors. If the selector removes + * all nodes from the list or if there aren't any living nodes then the + * {@link RestClient} will call this method with a list of "dead" nodes. + *

+ * Implementers should not rely on the ordering of the nodes. + */ + void select(Iterable nodes); + /* + * We were fairly careful with our choice of Iterable here. The caller has + * a List but reordering the list is likely to break round robin. Luckily + * Iterable doesn't allow any reordering. + */ + + /** + * Selector that matches any node. + */ + NodeSelector ANY = new NodeSelector() { + @Override + public void select(Iterable nodes) { + // Intentionally does nothing + } + + @Override + public String toString() { + return "ANY"; + } + }; + + /** + * Selector that matches any node that has metadata and doesn't + * have the {@code master} role OR it has the data {@code data} + * role. + */ + NodeSelector NOT_MASTER_ONLY = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getRoles() == null) continue; + if (node.getRoles().isMasterEligible() + && false == node.getRoles().isData() + && false == node.getRoles().isIngest()) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "NOT_MASTER_ONLY"; + } + }; +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java index 07ff89b7e3fb0..7c56a7edf97a9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -87,14 +87,14 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR /** * Logs a request that failed */ - static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) { + static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; try { - traceRequest = buildTraceRequest(request, host); + traceRequest = buildTraceRequest(request, node.getHost()); } catch (IOException e1) { tracer.trace("error while reading request for trace purposes", e); traceRequest = ""; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java index e31db17a336b0..97d150da3d3ff 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -37,18 +37,21 @@ */ public final class RequestOptions { public static final RequestOptions DEFAULT = new Builder( - Collections.

emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build(); + Collections.
emptyList(), NodeSelector.ANY, + HeapBufferedResponseConsumerFactory.DEFAULT).build(); private final List
headers; + private final NodeSelector nodeSelector; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private RequestOptions(Builder builder) { this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); + this.nodeSelector = builder.nodeSelector; this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; } public Builder toBuilder() { - Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory); + Builder builder = new Builder(headers, nodeSelector, httpAsyncResponseConsumerFactory); return builder; } @@ -59,6 +62,14 @@ public List
getHeaders() { return headers; } + /** + * The selector that chooses which nodes are valid destinations for + * {@link Request}s with these options. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + /** * The {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the @@ -82,6 +93,9 @@ public String toString() { b.append(headers.get(h).toString()); } } + if (nodeSelector != NodeSelector.ANY) { + b.append(", nodeSelector=").append(nodeSelector); + } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); } @@ -99,20 +113,24 @@ public boolean equals(Object obj) { RequestOptions other = (RequestOptions) obj; return headers.equals(other.headers) + && nodeSelector.equals(other.nodeSelector) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(headers, httpAsyncResponseConsumerFactory); + return Objects.hash(headers, nodeSelector, httpAsyncResponseConsumerFactory); } public static class Builder { private final List
headers; + private NodeSelector nodeSelector; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; - private Builder(List
headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + private Builder(List
headers, NodeSelector nodeSelector, + HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { this.headers = new ArrayList<>(headers); + this.nodeSelector = nodeSelector; this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; } @@ -133,7 +151,15 @@ public void addHeader(String name, String value) { } /** - * set the {@link HttpAsyncResponseConsumerFactory} used to create one + * Configure the selector that chooses which nodes are valid + * destinations for {@link Request}s with these options + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = Objects.requireNonNull(nodeSelector, "nodeSelector cannot be null"); + } + + /** + * Set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 02aedb4765abe..39bbf769713b2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -40,7 +40,7 @@ public class Response { Response(RequestLine requestLine, HttpHost host, HttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); - Objects.requireNonNull(host, "node cannot be null"); + Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); this.requestLine = requestLine; this.host = host; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 0e603c4069ae4..82039cab5d04c 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -46,10 +46,11 @@ import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.DeadHostState.TimeSupplier; -import javax.net.ssl.SSLHandshakeException; import java.io.Closeable; import java.io.IOException; +import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; @@ -57,11 +58,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -74,13 +74,16 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLHandshakeException; + +import static java.util.Collections.singletonList; /** * Client that connects to an Elasticsearch cluster through HTTP. *

* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults. * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later - * by calling {@link #setHosts(HttpHost...)}. + * by calling {@link #setNodes(Collection)}. *

* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and @@ -102,53 +105,93 @@ public class RestClient implements Closeable { final List

defaultHeaders; private final long maxRetryTimeoutMillis; private final String pathPrefix; - private final AtomicInteger lastHostIndex = new AtomicInteger(0); - private volatile HostTuple> hostTuple; + private final AtomicInteger lastNodeIndex = new AtomicInteger(0); private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private final FailureListener failureListener; + private volatile NodeTuple> nodeTuple; RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, - HttpHost[] hosts, String pathPrefix, FailureListener failureListener) { + List nodes, String pathPrefix, FailureListener failureListener) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); this.failureListener = failureListener; this.pathPrefix = pathPrefix; - setHosts(hosts); + setNodes(nodes); } /** * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Creates a new builder instance and sets the hosts that the client will send requests to. + *

+ * Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes. + * If you don't either one is fine. + */ + public static RestClientBuilder builder(Node... nodes) { + return new RestClientBuilder(nodes == null ? null : Arrays.asList(nodes)); + } + + /** + * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. + * Creates a new builder instance and sets the nodes that the client will send requests to. + *

+ * You can use this if you do not have metadata up front about the nodes. If you do, prefer + * {@link #builder(Node...)}. + * @see Node#Node(HttpHost) */ public static RestClientBuilder builder(HttpHost... hosts) { - return new RestClientBuilder(hosts); + return new RestClientBuilder(hostsToNodes(hosts)); } /** - * Replaces the hosts that the client communicates with. - * @see HttpHost + * Replaces the hosts with which the client communicates. + * + * @deprecated prefer {@link setNodes} because it allows you + * to set metadata for use with {@link NodeSelector}s */ - public synchronized void setHosts(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts must not be null nor empty"); + @Deprecated + public void setHosts(HttpHost... hosts) { + setNodes(hostsToNodes(hosts)); + } + + /** + * Replaces the nodes with which the client communicates. + */ + public synchronized void setNodes(Collection nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - Set httpHosts = new LinkedHashSet<>(); AuthCache authCache = new BasicAuthCache(); - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); - httpHosts.add(host); - authCache.put(host, new BasicScheme()); + + Map nodesByHost = new LinkedHashMap<>(); + for (Node node : nodes) { + Objects.requireNonNull(node, "node cannot be null"); + // TODO should we throw an IAE if we have two nodes with the same host? + nodesByHost.put(node.getHost(), node); + authCache.put(node.getHost(), new BasicScheme()); } - this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache); + this.nodeTuple = new NodeTuple<>( + Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache); this.blacklist.clear(); } + private static List hostsToNodes(HttpHost[] hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); + } + List nodes = new ArrayList<>(hosts.length); + for (int i = 0; i < hosts.length; i++) { + nodes.add(new Node(hosts[i])); + } + return nodes; + } + /** - * Returns the configured hosts + * Get the list of nodes that the client knows about. The list is + * unmodifiable. */ - public List getHosts() { - return new ArrayList<>(hostTuple.hosts); + public List getNodes() { + return nodeTuple.nodes; } /** @@ -434,7 +477,7 @@ public void performRequestAsync(String method, String endpoint, Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = requestParams.remove("ignore"); @@ -466,40 +509,40 @@ void performRequestAsyncNoCatch(Request request, ResponseListener listener) { setHeaders(httpRequest, request.getOptions().getHeaders()); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes, + performRequestAsync(startTime, nextNode(request.getOptions().getNodeSelector()), httpRequest, ignoreErrorCodes, request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); } - private void performRequestAsync(final long startTime, final HostTuple> hostTuple, final HttpRequestBase request, + private void performRequestAsync(final long startTime, final NodeTuple> nodeTuple, final HttpRequestBase request, final Set ignoreErrorCodes, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final FailureTrackingResponseListener listener) { - final HttpHost host = hostTuple.hosts.next(); + final Node node = nodeTuple.nodes.next(); //we stream the request body if the entity allows for it - final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); + final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(node.getHost(), request); final HttpAsyncResponseConsumer asyncResponseConsumer = httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); final HttpClientContext context = HttpClientContext.create(); - context.setAuthCache(hostTuple.authCache); + context.setAuthCache(nodeTuple.authCache); client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback() { @Override public void completed(HttpResponse httpResponse) { try { - RequestLogger.logResponse(logger, request, host, httpResponse); + RequestLogger.logResponse(logger, request, node.getHost(), httpResponse); int statusCode = httpResponse.getStatusLine().getStatusCode(); - Response response = new Response(request.getRequestLine(), host, httpResponse); + Response response = new Response(request.getRequestLine(), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { - onResponse(host); + onResponse(node); listener.onSuccess(response); } else { ResponseException responseException = new ResponseException(response); if (isRetryStatus(statusCode)) { //mark host dead and retry against next one - onFailure(host); + onFailure(node); retryIfPossible(responseException); } else { //mark host alive and don't retry, as the error should be a request problem - onResponse(host); + onResponse(node); listener.onDefinitiveFailure(responseException); } } @@ -511,8 +554,8 @@ public void completed(HttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request, host, failure); - onFailure(host); + RequestLogger.logFailedRequest(logger, request, node, failure); + onFailure(node); retryIfPossible(failure); } catch(Exception e) { listener.onDefinitiveFailure(e); @@ -520,7 +563,7 @@ public void failed(Exception failure) { } private void retryIfPossible(Exception exception) { - if (hostTuple.hosts.hasNext()) { + if (nodeTuple.nodes.hasNext()) { //in case we are retrying, check whether maxRetryTimeout has been reached long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeout = maxRetryTimeoutMillis - timeElapsedMillis; @@ -531,7 +574,7 @@ private void retryIfPossible(Exception exception) { } else { listener.trackFailure(exception); request.reset(); - performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); + performRequestAsync(startTime, nodeTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); } } else { listener.onDefinitiveFailure(exception); @@ -560,54 +603,103 @@ private void setHeaders(HttpRequest httpRequest, Collection

requestHeade } /** - * Returns an {@link Iterable} of hosts to be used for a request call. - * Ideally, the first host is retrieved from the iterable and used successfully for the request. - * Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until - * there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable. - * The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried, - * one dead host gets returned so that it can be retried. + * Returns a non-empty {@link Iterator} of nodes to be used for a request + * that match the {@link NodeSelector}. + *

+ * If there are no living nodes that match the {@link NodeSelector} + * this will return the dead node that matches the {@link NodeSelector} + * that is closest to being revived. + * @throws IOException if no nodes are available */ - private HostTuple> nextHost() { - final HostTuple> hostTuple = this.hostTuple; - Collection nextHosts = Collections.emptySet(); - do { - Set filteredHosts = new HashSet<>(hostTuple.hosts); - for (Map.Entry entry : blacklist.entrySet()) { - if (entry.getValue().shallBeRetried() == false) { - filteredHosts.remove(entry.getKey()); - } + private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { + NodeTuple> nodeTuple = this.nodeTuple; + List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector); + return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); + } + + /** + * Select hosts to try. Package private for testing. + */ + static List selectHosts(NodeTuple> nodeTuple, + Map blacklist, AtomicInteger lastNodeIndex, + NodeSelector nodeSelector) throws IOException { + /* + * Sort the nodes into living and dead lists. + */ + List livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); + List deadNodes = new ArrayList<>(blacklist.size()); + for (Node node : nodeTuple.nodes) { + DeadHostState deadness = blacklist.get(node.getHost()); + if (deadness == null) { + livingNodes.add(node); + continue; } - if (filteredHosts.isEmpty()) { - //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried - List> sortedHosts = new ArrayList<>(blacklist.entrySet()); - if (sortedHosts.size() > 0) { - Collections.sort(sortedHosts, new Comparator>() { - @Override - public int compare(Map.Entry o1, Map.Entry o2) { - return o1.getValue().compareTo(o2.getValue()); - } - }); - HttpHost deadHost = sortedHosts.get(0).getKey(); - logger.trace("resurrecting host [" + deadHost + "]"); - nextHosts = Collections.singleton(deadHost); + if (deadness.shallBeRetried()) { + livingNodes.add(node); + continue; + } + deadNodes.add(new DeadNode(node, deadness)); + } + + if (false == livingNodes.isEmpty()) { + /* + * Normal state: there is at least one living node. If the + * selector is ok with any over the living nodes then use them + * for the request. + */ + List selectedLivingNodes = new ArrayList<>(livingNodes); + nodeSelector.select(selectedLivingNodes); + if (false == selectedLivingNodes.isEmpty()) { + /* + * Rotate the list so subsequent requests will prefer the + * nodes in a different order. + */ + Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); + return selectedLivingNodes; + } + } + + /* + * Last resort: If there are no good nodes to use, either because + * the selector rejected all the living nodes or because there aren't + * any living ones. Either way, we want to revive a single dead node + * that the NodeSelectors are OK with. We do this by sorting the dead + * nodes by their revival time and passing them through the + * NodeSelector so it can have its say in which nodes are ok and their + * ordering. If the selector is ok with any of the nodes then use just + * the first one in the list because we only want to revive a single + * node. + */ + if (false == deadNodes.isEmpty()) { + final List selectedDeadNodes = new ArrayList<>(deadNodes); + /* + * We'd like NodeSelectors to remove items directly from deadNodes + * so we can find the minimum after it is filtered without having + * to compare many things. This saves us a sort on the unfiltered + * list. + */ + nodeSelector.select(new Iterable() { + @Override + public Iterator iterator() { + return new DeadNodeIteratorAdapter(selectedDeadNodes.iterator()); } - } else { - List rotatedHosts = new ArrayList<>(filteredHosts); - Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); - nextHosts = rotatedHosts; + }); + if (false == selectedDeadNodes.isEmpty()) { + return singletonList(Collections.min(selectedDeadNodes).node); } - } while(nextHosts.isEmpty()); - return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache); + } + throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, " + + "living " + livingNodes + " and dead " + deadNodes); } /** * Called after each successful request call. * Receives as an argument the host that was used for the successful request. */ - private void onResponse(HttpHost host) { - DeadHostState removedHost = this.blacklist.remove(host); + private void onResponse(Node node) { + DeadHostState removedHost = this.blacklist.remove(node.getHost()); if (logger.isDebugEnabled() && removedHost != null) { - logger.debug("removed host [" + host + "] from blacklist"); + logger.debug("removed [" + node + "] from blacklist"); } } @@ -615,20 +707,25 @@ private void onResponse(HttpHost host) { * Called after each failed attempt. * Receives as an argument the host that was used for the failed attempt. */ - private void onFailure(HttpHost host) { + private void onFailure(Node node) { while(true) { - DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT)); + DeadHostState previousDeadHostState = + blacklist.putIfAbsent(node.getHost(), new DeadHostState(TimeSupplier.DEFAULT)); if (previousDeadHostState == null) { - logger.debug("added host [" + host + "] to blacklist"); + if (logger.isDebugEnabled()) { + logger.debug("added [" + node + "] to blacklist"); + } break; } - if (blacklist.replace(host, previousDeadHostState, - new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) { - logger.debug("updated host [" + host + "] already in blacklist"); + if (blacklist.replace(node.getHost(), previousDeadHostState, + new DeadHostState(previousDeadHostState))) { + if (logger.isDebugEnabled()) { + logger.debug("updated [" + node + "] already in blacklist"); + } break; } } - failureListener.onFailure(host); + failureListener.onFailure(node); } @Override @@ -840,6 +937,11 @@ Response get() throws IOException { e.initCause(exception); throw e; } + if (exception instanceof ConnectException) { + ConnectException e = new ConnectException(exception.getMessage()); + e.initCause(exception); + throw e; + } if (exception instanceof IOException) { throw new IOException(exception.getMessage(), exception); } @@ -862,27 +964,76 @@ Response get() throws IOException { */ public static class FailureListener { /** - * Notifies that the host provided as argument has just failed + * Notifies that the node provided as argument has just failed */ - public void onFailure(HttpHost host) { - - } + public void onFailure(Node node) {} } /** - * {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread - * safe, volatile way. + * {@link NodeTuple} enables the {@linkplain Node}s and {@linkplain AuthCache} + * to be set together in a thread safe, volatile way. */ - private static class HostTuple { - final T hosts; + static class NodeTuple { + final T nodes; final AuthCache authCache; - HostTuple(final T hosts, final AuthCache authCache) { - this.hosts = hosts; + NodeTuple(final T nodes, final AuthCache authCache) { + this.nodes = nodes; this.authCache = authCache; } } + /** + * Contains a reference to a blacklisted node and the time until it is + * revived. We use this so we can do a single pass over the blacklist. + */ + private static class DeadNode implements Comparable { + final Node node; + final DeadHostState deadness; + + DeadNode(Node node, DeadHostState deadness) { + this.node = node; + this.deadness = deadness; + } + + @Override + public String toString() { + return node.toString(); + } + + @Override + public int compareTo(DeadNode rhs) { + return deadness.compareTo(rhs.deadness); + } + } + + /** + * Adapts an Iterator into an + * Iterator. + */ + private static class DeadNodeIteratorAdapter implements Iterator { + private final Iterator itr; + + private DeadNodeIteratorAdapter(Iterator itr) { + this.itr = itr; + } + + @Override + public boolean hasNext() { + return itr.hasNext(); + } + + @Override + public Node next() { + return itr.next().node; + } + + @Override + public void remove() { + itr.remove(); + } + } + /** * Add all headers from the provided varargs argument to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 5f7831c67fc28..17d27248dfea9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; @@ -32,6 +31,7 @@ import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; +import java.util.List; import java.util.Objects; /** @@ -48,7 +48,7 @@ public final class RestClientBuilder { private static final Header[] EMPTY_HEADERS = new Header[0]; - private final HttpHost[] hosts; + private final List nodes; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; private RestClient.FailureListener failureListener; @@ -59,18 +59,18 @@ public final class RestClientBuilder { /** * Creates a new builder instance and sets the hosts that the client will send requests to. * - * @throws NullPointerException if {@code hosts} or any host is {@code null}. - * @throws IllegalArgumentException if {@code hosts} is empty. + * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty. */ - RestClientBuilder(HttpHost... hosts) { - Objects.requireNonNull(hosts, "hosts must not be null"); - if (hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); + RestClientBuilder(List nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); + for (Node node : nodes) { + if (node == null) { + throw new IllegalArgumentException("node cannot be null"); + } } - this.hosts = hosts; + this.nodes = nodes; } /** @@ -186,7 +186,7 @@ public CloseableHttpAsyncClient run() { return createHttpClient(); } }); - RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); + RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener); httpClient.start(); return restClient; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index 75fbafd88f83c..daea27f896328 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -21,11 +21,15 @@ import java.util.concurrent.TimeUnit; +import org.elasticsearch.client.DeadHostState.TimeSupplier; + import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; public class DeadHostStateTests extends RestClientTestCase { @@ -42,7 +46,7 @@ public void testDeadHostStateFromPreviousDefaultTimeSupplier() { DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); previous = deadHostState; @@ -56,7 +60,7 @@ public void testCompareToDefaultTimeSupplier() { if (i == 0) { deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); } else { - deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT); + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]); } } for (int k = 1; k < deadHostStates.length; k++) { @@ -65,6 +69,17 @@ public void testCompareToDefaultTimeSupplier() { } } + public void testCompareToDifferingTimeSupplier() { + try { + new DeadHostState(TimeSupplier.DEFAULT).compareTo( + new DeadHostState(new ConfigurableTimeSupplier())); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals("can't compare DeadHostStates with different clocks [nanoTime != configured[0]]", + e.getMessage()); + } + } + public void testShallBeRetried() { ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); DeadHostState deadHostState = null; @@ -74,7 +89,7 @@ public void testShallBeRetried() { if (i == 0) { deadHostState = new DeadHostState(timeSupplier); } else { - deadHostState = new DeadHostState(deadHostState, timeSupplier); + deadHostState = new DeadHostState(deadHostState); } for (int j = 0; j < expectedTimeoutSecond; j++) { timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); @@ -94,25 +109,29 @@ public void testDeadHostStateTimeouts() { DeadHostState previous = new DeadHostState(zeroTimeSupplier); for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); - previous = new DeadHostState(previous, zeroTimeSupplier); + previous = new DeadHostState(previous); } //check that from here on the timeout does not increase int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); previous = deadHostState; } } - private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { - + static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { long nanoTime; @Override public long nanoTime() { return nanoTime; } + + @Override + public String toString() { + return "configured[" + nanoTime + "]"; + } } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java index e2f0ba81f6ed7..6c952fcf94759 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java @@ -22,6 +22,7 @@ import org.apache.http.HttpHost; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -29,14 +30,22 @@ import static org.junit.Assert.assertThat; /** - * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host. + * {@link RestClient.FailureListener} impl that allows to track when it gets called for which host. */ class HostsTrackingFailureListener extends RestClient.FailureListener { private volatile Set hosts = new HashSet<>(); @Override - public void onFailure(HttpHost host) { - hosts.add(host); + public void onFailure(Node node) { + hosts.add(node.getHost()); + } + + void assertCalled(List nodes) { + HttpHost[] hosts = new HttpHost[nodes.size()]; + for (int i = 0 ; i < nodes.size(); i++) { + hosts[i] = nodes.get(i).getHost(); + } + assertCalled(hosts); } void assertCalled(HttpHost... hosts) { @@ -48,4 +57,4 @@ void assertCalled(HttpHost... hosts) { void assertNotCalled() { assertEquals(0, hosts.size()); } -} \ No newline at end of file +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java new file mode 100644 index 0000000000000..d9df001ad437e --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class NodeSelectorTests extends RestClientTestCase { + public void testAny() { + List nodes = new ArrayList<>(); + int size = between(2, 5); + for (int i = 0; i < size; i++) { + nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean())); + } + List expected = new ArrayList<>(nodes); + NodeSelector.ANY.select(nodes); + assertEquals(expected, nodes); + } + + public void testNotMasterOnly() { + Node masterOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node masterAndData = dummyNode(true, true, false); + Node masterAndIngest = dummyNode(true, false, true); + Node coordinatingOnly = dummyNode(false, false, false); + Node ingestOnly = dummyNode(false, false, true); + Node data = dummyNode(false, true, randomBoolean()); + List nodes = new ArrayList<>(); + nodes.add(masterOnly); + nodes.add(all); + nodes.add(masterAndData); + nodes.add(masterAndIngest); + nodes.add(coordinatingOnly); + nodes.add(ingestOnly); + nodes.add(data); + Collections.shuffle(nodes, getRandom()); + List expected = new ArrayList<>(nodes); + expected.remove(masterOnly); + NodeSelector.NOT_MASTER_ONLY.select(nodes); + assertEquals(expected, nodes); + } + + private Node dummyNode(boolean master, boolean data, boolean ingest) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(master, data, ingest)); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java new file mode 100644 index 0000000000000..c6d60415b88dc --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.Arrays; +import java.util.HashSet; + +import static java.util.Collections.singleton; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class NodeTests extends RestClientTestCase { + public void testToString() { + assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), + null, null, null, new Roles(true, true, true)).toString()); + assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), + null, null, "ver", null).toString()); + assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), + null, "nam", null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), + "nam", "ver", new Roles(true, false, false)).toString()); + + } + + public void testEqualsAndHashCode() { + HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); + Node node = new Node(host, + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true)); + assertFalse(node.equals(null)); + assertTrue(node.equals(node)); + assertEquals(node.hashCode(), node.hashCode()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + assertTrue(node.equals(copy)); + assertEquals(node.hashCode(), copy.hashCode()); + assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java index 19106792228d9..a78be6c126bae 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java @@ -114,6 +114,10 @@ static RequestOptions.Builder randomBuilder() { } } + if (randomBoolean()) { + builder.setNodeSelector(mock(NodeSelector.class)); + } + if (randomBoolean()) { builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); } @@ -127,12 +131,15 @@ private static RequestOptions copy(RequestOptions options) { private static RequestOptions mutate(RequestOptions options) { RequestOptions.Builder mutant = options.toBuilder(); - int mutationType = between(0, 1); + int mutationType = between(0, 2); switch (mutationType) { case 0: mutant.addHeader("extra", "m"); return mutant.build(); case 1: + mutant.setNodeSelector(mock(NodeSelector.class)); + return mutant.build(); + case 2: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); return mutant.build(); default: diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 9657e782bda04..9fcb4978e28a7 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -39,21 +39,42 @@ public void testBuild() throws IOException { try { RestClient.builder((HttpHost[])null); fail("should have failed"); - } catch(NullPointerException e) { - assertEquals("hosts must not be null", e.getMessage()); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); } try { - RestClient.builder(); + RestClient.builder((Node[])null); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("no hosts provided", e.getMessage()); + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node(new HttpHost("localhost", 9200)), null); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); } try { RestClient.builder(new HttpHost("localhost", 9200), null); fail("should have failed"); - } catch(NullPointerException e) { + } catch(IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index aa323276404cf..92a960090ad6a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -29,9 +29,11 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -42,6 +44,7 @@ import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. @@ -50,31 +53,37 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { private static HttpServer[] httpServers; - private static RestClient restClient; + private static HttpHost[] httpHosts; + private static boolean stoppedFirstHost = false; + private static String pathPrefixWithoutLeadingSlash; private static String pathPrefix; + private static RestClient restClient; @BeforeClass public static void startHttpServer() throws Exception { - String pathPrefixWithoutLeadingSlash; if (randomBoolean()) { - pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5); pathPrefix = "/" + pathPrefixWithoutLeadingSlash; } else { pathPrefix = pathPrefixWithoutLeadingSlash = ""; } int numHttpServers = randomIntBetween(2, 4); httpServers = new HttpServer[numHttpServers]; - HttpHost[] httpHosts = new HttpHost[numHttpServers]; + httpHosts = new HttpHost[numHttpServers]; for (int i = 0; i < numHttpServers; i++) { HttpServer httpServer = createHttpServer(); httpServers[i] = httpServer; httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); } + restClient = buildRestClient(); + } + + private static RestClient buildRestClient() { RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); } - restClient = restClientBuilder.build(); + return restClientBuilder.build(); } private static HttpServer createHttpServer() throws Exception { @@ -118,6 +127,9 @@ public void stopRandomHost() { if (httpServers.length > 1 && randomBoolean()) { List updatedHttpServers = new ArrayList<>(httpServers.length - 1); int nodeIndex = randomInt(httpServers.length - 1); + if (0 == nodeIndex) { + stoppedFirstHost = true; + } for (int i = 0; i < httpServers.length; i++) { HttpServer httpServer = httpServers[i]; if (i == nodeIndex) { @@ -182,6 +194,35 @@ public void onFailure(Exception exception) { } } + /** + * Test host selector against a real server and + * test what happens after calling + */ + public void testNodeSelector() throws IOException { + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(firstPositionNodeSelector()); + request.setOptions(options); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + if (stoppedFirstHost) { + try { + restClient.performRequest(request); + fail("expected to fail to connect"); + } catch (ConnectException e) { + assertEquals("Connection refused", e.getMessage()); + } + } else { + Response response = restClient.performRequest(request); + assertEquals(httpHosts[0], response.getHost()); + } + } + } + private static class TestResponse { private final String method; private final int statusCode; @@ -203,4 +244,17 @@ Response getResponse() { throw new AssertionError("unexpected response " + response.getClass()); } } + + private NodeSelector firstPositionNodeSelector() { + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if (httpHosts[0] != itr.next().getHost()) { + itr.remove(); + } + } + } + }; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index a3a834ff3204b..eb591f4ccff3a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -35,6 +35,7 @@ import org.apache.http.message.BasicStatusLine; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.Node.Roles; import org.junit.After; import org.junit.Before; import org.mockito.invocation.InvocationOnMock; @@ -42,8 +43,11 @@ import java.io.IOException; import java.net.SocketTimeoutException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -71,7 +75,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; - private HttpHost[] httpHosts; + private List nodes; private HostsTrackingFailureListener failureListener; @Before @@ -108,13 +112,14 @@ public void run() { return null; } }); - int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); - httpHosts = new HttpHost[numHosts]; - for (int i = 0; i < numHosts; i++) { - httpHosts[i] = new HttpHost("localhost", 9200 + i); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5); + nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("localhost", 9200 + i))); } + nodes = Collections.unmodifiableList(nodes); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener); + restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener); } /** @@ -128,9 +133,8 @@ public void shutdownExec() { public void testRoundRobinOkStatusCodes() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { int statusCode = randomOkStatusCode(getRandom()); Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); assertEquals(statusCode, response.getStatusLine().getStatusCode()); @@ -144,9 +148,8 @@ public void testRoundRobinOkStatusCodes() throws IOException { public void testRoundRobinNoRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { String method = randomHttpMethod(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom()); try { @@ -185,10 +188,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (ResponseException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { Response response = e.getResponse(); assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode()); @@ -210,10 +212,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (IOException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { HttpHost httpHost = HttpHost.create(e.getMessage()); assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); @@ -232,9 +233,8 @@ public void testRoundRobinRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { retryEndpoint = randomErrorRetryEndpoint(); try { restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); @@ -308,6 +308,58 @@ public void testRoundRobinRetryErrors() throws IOException { } } + public void testNodeSelector() throws IOException { + NodeSelector firstPositionOnly = new NodeSelector() { + @Override + public void select(Iterable restClientNodes) { + boolean found = false; + for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { + if (nodes.get(0) == itr.next()) { + found = true; + } else { + itr.remove(); + } + } + assertTrue(found); + } + }; + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(firstPositionOnly); + request.setOptions(options); + Response response = restClient.performRequest(request); + assertEquals(nodes.get(0).getHost(), response.getHost()); + } + } + + public void testSetNodes() throws IOException { + List newNodes = new ArrayList<>(nodes.size()); + for (int i = 0; i < nodes.size(); i++) { + Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); + } + restClient.setNodes(newNodes); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + request.setOptions(options); + Response response = restClient.performRequest(request); + assertEquals(newNodes.get(0).getHost(), response.getHost()); + } + } + private static String randomErrorRetryEndpoint() { switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: @@ -321,4 +373,16 @@ private static String randomErrorRetryEndpoint() { } throw new UnsupportedOperationException(); } + + /** + * Build a mutable {@link Set} containing all the {@link Node#getHost() hosts} + * in use by the test. + */ + private Set hostsSet() { + Set hosts = new HashSet<>(); + for (Node node : nodes) { + hosts.add(node.getHost()); + } + return hosts; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 634929c5de156..5987fe7dd9849 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -65,6 +65,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; @@ -94,7 +95,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; private Header[] defaultHeaders; - private HttpHost httpHost; + private Node node; private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; @@ -108,7 +109,7 @@ public void createRestClient() { public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; - assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); + assertThat(context.getAuthCache().get(node.getHost()), instanceOf(BasicScheme.class)); final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); @@ -146,9 +147,10 @@ public void run() { }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); - httpHost = new HttpHost("localhost", 9200); + node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); + restClient = new RestClient(httpClient, 10000, defaultHeaders, + singletonList(node), null, failureListener); } /** @@ -244,7 +246,7 @@ public void testErrorStatusCodes() throws IOException { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { failureListener.assertNotCalled(); } else { - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); } } } @@ -259,14 +261,14 @@ public void testIOExceptions() { } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); try { performRequest(method, "/soe"); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 5fe5fcae78fee..01f6f308f6227 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -22,14 +22,23 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.elasticsearch.client.DeadHostStateTests.ConfigurableTimeSupplier; +import org.elasticsearch.client.RestClient.NodeTuple; import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -43,9 +52,9 @@ public class RestClientTests extends RestClientTestCase { public void testCloseIsIdempotent() throws IOException { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; + List nodes = singletonList(new Node(new HttpHost("localhost", 9200))); CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); - RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], hosts, null, null); + RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); restClient.close(); verify(closeableHttpAsyncClient, times(1)).close(); restClient.close(); @@ -225,6 +234,7 @@ public void testBuildUriLeavesPathUntouched() { } } + @Deprecated public void testSetHostsWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost[]) null); @@ -241,45 +251,75 @@ public void testSetHostsWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost) null); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } } - public void testSetHostsPreservesOrdering() throws Exception { + public void testSetNodesWrongArguments() throws IOException { + try (RestClient restClient = createRestClient()) { + restClient.setNodes(null); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Collections.emptyList()); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Collections.singletonList((Node) null)); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Arrays.asList( + new Node(new HttpHost("localhost", 9200)), + null, + new Node(new HttpHost("localhost", 9201)))); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + } + + public void testSetNodesPreservesOrdering() throws Exception { try (RestClient restClient = createRestClient()) { - HttpHost[] hosts = randomHosts(); - restClient.setHosts(hosts); - assertEquals(Arrays.asList(hosts), restClient.getHosts()); + List nodes = randomNodes(); + restClient.setNodes(nodes); + assertEquals(nodes, restClient.getNodes()); } } - private static HttpHost[] randomHosts() { - int numHosts = randomIntBetween(1, 10); - HttpHost[] hosts = new HttpHost[numHosts]; - for (int i = 0; i < hosts.length; i++) { - hosts[i] = new HttpHost("host-" + i, 9200); + private static List randomNodes() { + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("host-" + i, 9200))); } - return hosts; + return nodes; } - public void testSetHostsDuplicatedHosts() throws Exception { + public void testSetNodesDuplicatedHosts() throws Exception { try (RestClient restClient = createRestClient()) { - int numHosts = randomIntBetween(1, 10); - HttpHost[] hosts = new HttpHost[numHosts]; - HttpHost host = new HttpHost("host", 9200); - for (int i = 0; i < hosts.length; i++) { - hosts[i] = host; + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + Node node = new Node(new HttpHost("host", 9200)); + for (int i = 0; i < numNodes; i++) { + nodes.add(node); } - restClient.setHosts(hosts); - assertEquals(1, restClient.getHosts().size()); - assertEquals(host, restClient.getHosts().get(0)); + restClient.setNodes(nodes); + assertEquals(1, restClient.getNodes().size()); + assertEquals(node, restClient.getNodes().get(0)); } } @@ -300,8 +340,143 @@ public void testNullPath() throws IOException { } } + public void testSelectHosts() throws IOException { + Node n1 = new Node(new HttpHost("1"), null, null, "1", null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + + NodeSelector not1 = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if ("1".equals(itr.next().getVersion())) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "NOT 1"; + } + }; + NodeSelector noNodes = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + itr.next(); + itr.remove(); + } + } + + @Override + public String toString() { + return "NONE"; + } + }; + + NodeTuple> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null); + + Map emptyBlacklist = Collections.emptyMap(); + + // Normal cases where the node selector doesn't reject all living nodes + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1); + + /* + * Try a NodeSelector that excludes all nodes. This should + * throw an exception + */ + { + String message = "NodeSelector [NONE] rejected all nodes, living [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]] and dead []"; + assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes)); + } + + // Mark all the nodes dead for a few test cases + { + ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); + Map blacklist = new HashMap<>(); + blacklist.put(n1.getHost(), new DeadHostState(timeSupplier)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier))); + blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier)))); + + /* + * selectHosts will revive a single host if regardless of + * blacklist time. It'll revive the node that is closest + * to being revived that the NodeSelector is ok with. + */ + assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY)); + assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * return a failure, but a different failure than when the + * blacklist is empty so that the caller knows that all of + * their nodes are blacklisted AND blocked. + */ + String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]]"; + assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes)); + + /* + * Now lets wind the clock forward, past the timeout for one of + * the dead nodes. We should return it. + */ + timeSupplier.nanoTime = new DeadHostState(timeSupplier).getDeadUntilNanos(); + assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY); + + /* + * But if the NodeSelector rejects that node then we'll pick the + * first on that the NodeSelector doesn't reject. + */ + assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1); + + /* + * If we wind the clock way into the future, past any of the + * blacklist timeouts then we function as though the nodes aren't + * in the blacklist at all. + */ + timeSupplier.nanoTime += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS; + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1); + } + } + + private void assertSelectLivingHosts(List expectedNodes, NodeTuple> nodeTuple, + Map blacklist, NodeSelector nodeSelector) throws IOException { + int iterations = 1000; + AtomicInteger lastNodeIndex = new AtomicInteger(0); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); + // Calling it again rotates the set of results + for (int i = 1; i < iterations; i++) { + Collections.rotate(expectedNodes, 1); + assertEquals("iteration " + i, expectedNodes, + RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); + } + } + + /** + * Assert that {@link RestClient#selectHosts} fails on the provided arguments. + * @return the message in the exception thrown by the failure + */ + private String assertSelectAllRejected( NodeTuple> nodeTuple, + Map blacklist, NodeSelector nodeSelector) { + try { + RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector); + throw new AssertionError("expected selectHosts to fail"); + } catch (IOException e) { + return e.getMessage(); + } + } + private static RestClient createRestClient() { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; - return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null); + List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); + return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), + new Header[] {}, nodes, null, null); } + + } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index d73c29bd91bc4..0cc41b078b8d6 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,7 +36,9 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -72,6 +74,19 @@ */ @SuppressWarnings("unused") public class RestClientDocumentation { + private static final String TOKEN = "DUMMY"; + + // tag::rest-client-options-singleton + private static final RequestOptions COMMON_OPTIONS; + static { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> + builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2> + builder.setHttpAsyncResponseConsumerFactory( // <3> + new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); + COMMON_OPTIONS = builder.build(); + } + // end::rest-client-options-singleton @SuppressWarnings("unused") public void testUsage() throws IOException, InterruptedException { @@ -104,7 +119,7 @@ public void testUsage() throws IOException, InterruptedException { RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); builder.setFailureListener(new RestClient.FailureListener() { @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { // <1> } }); @@ -172,22 +187,14 @@ public void onFailure(Exception exception) { //tag::rest-client-body-shorter request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter - { - //tag::rest-client-headers - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Accept", "text/plain"); - options.addHeader("Cache-Control", "no-cache"); - request.setOptions(options); - //end::rest-client-headers - } - { - //tag::rest-client-response-consumer - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setHttpAsyncResponseConsumerFactory( - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); - request.setOptions(options); - //end::rest-client-response-consumer - } + //tag::rest-client-options-set-singleton + request.setOptions(COMMON_OPTIONS); + //end::rest-client-options-set-singleton + //tag::rest-client-options-customize + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java similarity index 50% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index 34a4988358653..da7ef4700fd2f 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -26,31 +26,34 @@ import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.Node.Roles; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.ArrayList; -import java.util.Collections; +import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.TimeUnit; /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. - * Compatible with elasticsearch 5.x and 2.x. + * Compatible with elasticsearch 2.x+. */ -public final class ElasticsearchHostsSniffer implements HostsSniffer { +public final class ElasticsearchNodesSniffer implements NodesSniffer { - private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class); + private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class); public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); private final RestClient restClient; - private final Map sniffRequestParams; + private final Request request; private final Scheme scheme; private final JsonFactory jsonFactory = new JsonFactory(); @@ -62,8 +65,8 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer { * that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same * client that was used to fetch them. */ - public ElasticsearchHostsSniffer(RestClient restClient) { - this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP); + public ElasticsearchNodesSniffer(RestClient restClient) { + this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP); } /** @@ -77,30 +80,32 @@ public ElasticsearchHostsSniffer(RestClient restClient) { * that have responded within this timeout will be returned. * @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) */ - public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { + public ElasticsearchNodesSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null"); if (sniffRequestTimeoutMillis < 0) { throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); } - this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); + this.request = new Request("GET", "/_nodes/http"); + request.addParameter("timeout", sniffRequestTimeoutMillis + "ms"); this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null"); } /** * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts */ - public List sniffHosts() throws IOException { - Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams); - return readHosts(response.getEntity()); + @Override + public List sniff() throws IOException { + Response response = restClient.performRequest(request); + return readHosts(response.getEntity(), scheme, jsonFactory); } - private List readHosts(HttpEntity entity) throws IOException { + static List readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException { try (InputStream inputStream = entity.getContent()) { JsonParser parser = jsonFactory.createParser(inputStream); if (parser.nextToken() != JsonToken.START_OBJECT) { throw new IOException("expected data to start with an object"); } - List hosts = new ArrayList<>(); + List nodes = new ArrayList<>(); while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if ("nodes".equals(parser.getCurrentName())) { @@ -108,10 +113,9 @@ private List readHosts(HttpEntity entity) throws IOException { JsonToken token = parser.nextToken(); assert token == JsonToken.START_OBJECT; String nodeId = parser.getCurrentName(); - HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); - if (sniffedHost != null) { - logger.trace("adding node [" + nodeId + "]"); - hosts.add(sniffedHost); + Node node = readNode(nodeId, parser, scheme); + if (node != null) { + nodes.add(node); } } } else { @@ -119,13 +123,31 @@ private List readHosts(HttpEntity entity) throws IOException { } } } - return hosts; + return nodes; } } - private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { - HttpHost httpHost = null; + private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException { + HttpHost publishedHost = null; + /* + * We sniff the bound hosts so we can look up the node based on any + * address on which it is listening. This is useful in Elasticsearch's + * test framework where we sometimes publish ipv6 addresses but the + * tests contact the node on ipv4. + */ + Set boundHosts = new HashSet<>(); + String name = null; + String version = null; String fieldName = null; + // Used to read roles from 5.0+ + boolean sawRoles = false; + boolean master = false; + boolean data = false; + boolean ingest = false; + // Used to read roles from 2.x + Boolean masterAttribute = null; + Boolean dataAttribute = null; + boolean clientAttribute = false; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -133,9 +155,27 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme if ("http".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { - URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); - httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), - boundAddressAsURI.getScheme()); + URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(), + publishAddressAsURI.getScheme()); + } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_ARRAY) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme())); + } + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else if ("attributes".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { + masterAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { + dataAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { + clientAttribute = toBoolean(parser.getValueAsString()); } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { parser.skipChildren(); } @@ -143,14 +183,55 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme } else { parser.skipChildren(); } + } else if (parser.currentToken() == JsonToken.START_ARRAY) { + if ("roles".equals(fieldName)) { + sawRoles = true; + while (parser.nextToken() != JsonToken.END_ARRAY) { + switch (parser.getText()) { + case "master": + master = true; + break; + case "data": + data = true; + break; + case "ingest": + ingest = true; + break; + default: + logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]"); + } + } + } else { + parser.skipChildren(); + } + } else if (parser.currentToken().isScalarValue()) { + if ("version".equals(fieldName)) { + version = parser.getText(); + } else if ("name".equals(fieldName)) { + name = parser.getText(); + } } } //http section is not present if http is not enabled on the node, ignore such nodes - if (httpHost == null) { + if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; + } else { + logger.trace("adding node [" + nodeId + "]"); + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); } - return httpHost; } public enum Scheme { @@ -167,4 +248,15 @@ public String toString() { return name; } } + + private static boolean toBoolean(String string) { + switch (string) { + case "true": + return true; + case "false": + return false; + default: + throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); + } + } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java similarity index 85% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java index 9eb7b34425944..c22c18f6eae32 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.io.IOException; import java.util.List; @@ -27,9 +27,9 @@ /** * Responsible for sniffing the http hosts */ -public interface HostsSniffer { +public interface NodesSniffer { /** - * Returns the sniffed http hosts + * Returns the sniffed Elasticsearch nodes. */ - List sniffHosts() throws IOException; + List sniff() throws IOException; } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java index 41051555bae2c..9d5627922823d 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import java.util.Objects; @@ -54,7 +54,7 @@ public void setSniffer(Sniffer sniffer) { } @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index dc873ccd44e10..73780586e7617 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -21,7 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -29,6 +29,7 @@ import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.Collection; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -43,7 +44,7 @@ /** * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of * {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults. - * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. + * A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link RestClient} instance. * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to * {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. @@ -53,7 +54,7 @@ public class Sniffer implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; - private final HostsSniffer hostsSniffer; + private final NodesSniffer nodesSniffer; private final RestClient restClient; private final long sniffIntervalMillis; private final long sniffAfterFailureDelayMillis; @@ -61,12 +62,12 @@ public class Sniffer implements Closeable { private final AtomicBoolean initialized = new AtomicBoolean(false); private volatile ScheduledTask nextScheduledTask; - Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { - this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); + Sniffer(RestClient restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) { + this(restClient, nodesSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); } - Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { - this.hostsSniffer = hostsSniffer; + Sniffer(RestClient restClient, NodesSniffer nodesSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { + this.nodesSniffer = nodesSniffer; this.restClient = restClient; this.sniffIntervalMillis = sniffInterval; this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; @@ -205,14 +206,14 @@ boolean skip() { } final void sniff() throws IOException { - List sniffedHosts = hostsSniffer.sniffHosts(); + List sniffedNodes = nodesSniffer.sniff(); if (logger.isDebugEnabled()) { - logger.debug("sniffed hosts: " + sniffedHosts); + logger.debug("sniffed nodes: " + sniffedNodes); } - if (sniffedHosts.isEmpty()) { - logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + if (sniffedNodes.isEmpty()) { + logger.warn("no nodes to set, nodes will be updated at the next sniffing round"); } else { - restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + restClient.setNodes(sniffedNodes); } } @@ -227,7 +228,8 @@ public void close() { /** * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. * - * @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched + * @param restClient the client that gets its hosts set (via + * {@link RestClient#setNodes(Collection)}) once they are fetched * @return a new instance of {@link SnifferBuilder} */ public static SnifferBuilder builder(RestClient restClient) { diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java index 010a8a4a78d20..48ca52d423012 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java @@ -34,7 +34,7 @@ public final class SnifferBuilder { private final RestClient restClient; private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; - private HostsSniffer hostsSniffer; + private NodesSniffer nodesSniffer; /** * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch @@ -69,13 +69,13 @@ public SnifferBuilder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelay } /** - * Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer} - * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer}, + * Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer} + * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer}, * or to provide a different implementation (e.g. in case hosts need to taken from a different source). */ - public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { - Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); - this.hostsSniffer = hostsSniffer; + public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) { + Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null"); + this.nodesSniffer = nodesSniffer; return this; } @@ -83,9 +83,9 @@ public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - if (hostsSniffer == null) { - this.hostsSniffer = new ElasticsearchHostsSniffer(restClient); + if (nodesSniffer == null) { + this.nodesSniffer = new ElasticsearchNodesSniffer(restClient); } - return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); + return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java new file mode 100644 index 0000000000000..712a836a17b8a --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.client.Node.Roles; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.fasterxml.jackson.core.JsonFactory; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertThat; + +/** + * Test parsing the response from the {@code /_nodes/http} API from fixed + * versions of Elasticsearch. + */ +public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { + private void checkFile(String file, Node... expected) throws IOException { + InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(file); + if (in == null) { + throw new IllegalArgumentException("Couldn't find [" + file + "]"); + } + try { + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); + // Use these assertions because the error messages are nicer than hasItems. + assertThat(nodes, hasSize(expected.length)); + for (Node expectedNode : expected) { + assertThat(nodes, hasItem(expectedNode)); + } + } finally { + in.close(); + } + } + + public void test2x() throws IOException { + checkFile("2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", true, false, false), + node(9202, "m2", "2.0.0", true, true, false), + node(9201, "m3", "2.0.0", true, false, false), + node(9205, "d1", "2.0.0", false, true, false), + node(9204, "d2", "2.0.0", false, true, false), + node(9203, "d3", "2.0.0", false, true, false), + node(9207, "c1", "2.0.0", false, false, false), + node(9206, "c2", "2.0.0", false, false, false)); + } + + public void test5x() throws IOException { + checkFile("5.0.0_nodes_http.json", + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true), + node(9203, "d1", "5.0.0", false, true, true), + node(9204, "d2", "5.0.0", false, true, true), + node(9205, "d3", "5.0.0", false, true, true), + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true)); + } + + public void test6x() throws IOException { + checkFile("6.0.0_nodes_http.json", + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true), + node(9203, "d1", "6.0.0", false, true, true), + node(9204, "d2", "6.0.0", false, true, true), + node(9205, "d3", "6.0.0", false, true, true), + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true)); + } + + private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { + HttpHost host = new HttpHost("127.0.0.1", port); + Set boundHosts = new HashSet<>(2); + boundHosts.add(host); + boundHosts.add(new HttpHost("[::1]", port)); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java similarity index 76% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index ed2744df31c61..260832ca90e17 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -30,6 +30,7 @@ import org.apache.http.Consts; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -44,10 +45,10 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -59,17 +60,17 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -public class ElasticsearchHostsSnifferTests extends RestClientTestCase { +public class ElasticsearchNodesSnifferTests extends RestClientTestCase { private int sniffRequestTimeout; - private ElasticsearchHostsSniffer.Scheme scheme; + private ElasticsearchNodesSniffer.Scheme scheme; private SniffResponse sniffResponse; private HttpServer httpServer; @Before public void startHttpServer() throws IOException { this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); - this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); + this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchNodesSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); } else { @@ -86,7 +87,7 @@ public void stopHttpServer() throws IOException { public void testConstructorValidation() throws IOException { try { - new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch(NullPointerException e) { assertEquals("restClient cannot be null", e.getMessage()); @@ -94,14 +95,14 @@ public void testConstructorValidation() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { try { - new ElasticsearchHostsSniffer(restClient, 1, null); + new ElasticsearchNodesSniffer(restClient, 1, null); fail("should have failed"); } catch (NullPointerException e) { assertEquals(e.getMessage(), "scheme cannot be null"); } try { - new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), - ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), + ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); @@ -112,17 +113,13 @@ public void testConstructorValidation() throws IOException { public void testSniffNodes() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { - ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme); + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme); try { - List sniffedHosts = sniffer.sniffHosts(); + List sniffedNodes = sniffer.sniff(); if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); - Iterator responseHostsIterator = sniffResponse.hosts.iterator(); - for (HttpHost sniffedHost : sniffedHosts) { - assertEquals(sniffedHost, responseHostsIterator.next()); - } + assertEquals(sniffResponse.result, sniffedNodes); } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { @@ -173,9 +170,9 @@ public void handle(HttpExchange httpExchange) throws IOException { } } - private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { + private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException { int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); - List hosts = new ArrayList<>(numNodes); + List nodes = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); JsonGenerator generator = jsonFactory.createGenerator(writer); @@ -190,6 +187,23 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeObjectFieldStart("nodes"); for (int i = 0; i < numNodes; i++) { String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); + String host = "host" + i; + int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); + HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + Set boundHosts = new HashSet<>(); + boundHosts.add(publishHost); + + if (randomBoolean()) { + int bound = between(1, 5); + for (int b = 0; b < bound; b++) { + boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + } + } + + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); @@ -203,44 +217,45 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme } boolean isHttpEnabled = rarely() == false; if (isHttpEnabled) { - String host = "host" + i; - int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost httpHost = new HttpHost(host, port, scheme.toString()); - hosts.add(httpHost); + nodes.add(node); generator.writeObjectFieldStart("http"); - if (getRandom().nextBoolean()) { - generator.writeArrayFieldStart("bound_address"); - generator.writeString("[fe80::1]:" + port); - generator.writeString("[::1]:" + port); - generator.writeString("127.0.0.1:" + port); - generator.writeEndArray(); + generator.writeArrayFieldStart("bound_address"); + for (HttpHost bound : boundHosts) { + generator.writeString(bound.toHostString()); } + generator.writeEndArray(); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); generator.writeEndObject(); } - generator.writeStringField("publish_address", httpHost.toHostString()); + generator.writeStringField("publish_address", publishHost.toHostString()); if (getRandom().nextBoolean()) { generator.writeNumberField("max_content_length_in_bytes", 104857600); } generator.writeEndObject(); } - if (getRandom().nextBoolean()) { - String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Set nodeRoles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - String role; - do { - role = RandomPicks.randomFrom(getRandom(), roles); - } while(nodeRoles.add(role) == false); + + List roles = Arrays.asList(new String[] {"master", "data", "ingest"}); + Collections.shuffle(roles, getRandom()); + generator.writeArrayFieldStart("roles"); + for (String role : roles) { + if ("master".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("master"); } - generator.writeArrayFieldStart("roles"); - for (String nodeRole : nodeRoles) { - generator.writeString(nodeRole); + if ("data".equals(role) && node.getRoles().isData()) { + generator.writeString("data"); + } + if ("ingest".equals(role) && node.getRoles().isIngest()) { + generator.writeString("ingest"); } - generator.writeEndArray(); } + generator.writeEndArray(); + + generator.writeFieldName("version"); + generator.writeString(node.getVersion()); + generator.writeFieldName("name"); + generator.writeString(node.getName()); + int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { @@ -260,18 +275,18 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeEndObject(); generator.writeEndObject(); generator.close(); - return SniffResponse.buildResponse(writer.toString(), hosts); + return SniffResponse.buildResponse(writer.toString(), nodes); } private static class SniffResponse { private final String nodesInfoBody; private final int nodesInfoResponseCode; - private final List hosts; + private final List result; private final boolean isFailure; - SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) { + SniffResponse(String nodesInfoBody, List result, boolean isFailure) { this.nodesInfoBody = nodesInfoBody; - this.hosts = hosts; + this.result = result; this.isFailure = isFailure; if (isFailure) { this.nodesInfoResponseCode = randomErrorResponseCode(); @@ -281,11 +296,11 @@ private static class SniffResponse { } static SniffResponse buildFailure() { - return new SniffResponse("", Collections.emptyList(), true); + return new SniffResponse("", Collections.emptyList(), true); } - static SniffResponse buildResponse(String nodesInfoBody, List hosts) { - return new SniffResponse(nodesInfoBody, hosts, false); + static SniffResponse buildResponse(String nodesInfoBody, List nodes) { + return new SniffResponse(nodesInfoBody, nodes, false); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java similarity index 78% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java index 7550459e9ea50..8acd929498e1b 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java @@ -20,16 +20,17 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.util.Collections; import java.util.List; /** - * Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc. + * Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc. */ -class MockHostsSniffer implements HostsSniffer { +class MockNodesSniffer implements NodesSniffer { @Override - public List sniffHosts() { - return Collections.singletonList(new HttpHost("localhost", 9200)); + public List sniff() { + return Collections.singletonList(new Node(new HttpHost("localhost", 9200))); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java index 1fece270ffe0d..225bdb9a0097e 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; @@ -46,7 +47,7 @@ public void testSetSniffer() throws Exception { } try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) { - try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) { + try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) { listener.setSniffer(sniffer); try { listener.setSniffer(sniffer); @@ -54,7 +55,7 @@ public void testSetSniffer() throws Exception { } catch(IllegalStateException e) { assertEquals("sniffer can only be set once", e.getMessage()); } - listener.onFailure(new HttpHost("localhost", 9200)); + listener.onFailure(new Node(new HttpHost("localhost", 9200))); } } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 9a7359e9c7215..f924a9fbebc81 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -61,10 +61,10 @@ public void testBuild() throws Exception { try { - Sniffer.builder(client).setHostsSniffer(null); + Sniffer.builder(client).setNodesSniffer(null); fail("should have failed"); } catch(NullPointerException e) { - assertEquals("hostsSniffer cannot be null", e.getMessage()); + assertEquals("nodesSniffer cannot be null", e.getMessage()); } @@ -80,7 +80,7 @@ public void testBuild() throws Exception { builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { - builder.setHostsSniffer(new MockHostsSniffer()); + builder.setNodesSniffer(new MockNodesSniffer()); } try (Sniffer sniffer = builder.build()) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 8172774a77d80..00c5eb31d17e8 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -20,11 +20,11 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; import org.elasticsearch.client.sniff.Sniffer.Scheduler; -import org.mockito.Matchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -62,6 +62,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyCollectionOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -71,12 +72,12 @@ public class SnifferTests extends RestClientTestCase { /** - * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation + * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link NodesSniffer} implementation * to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance. */ public void testSniff() throws IOException { - HttpHost initialHost = new HttpHost("localhost", 9200); - try (RestClient restClient = RestClient.builder(initialHost).build()) { + Node initialNode = new Node(new HttpHost("localhost", 9200)); + try (RestClient restClient = RestClient.builder(initialNode).build()) { Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -88,53 +89,53 @@ public void shutdown() { } }; - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); int iters = randomIntBetween(5, 30); - try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){ + try (Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 1000L, -1)){ { - assertEquals(1, restClient.getHosts().size()); - HttpHost httpHost = restClient.getHosts().get(0); - assertEquals("localhost", httpHost.getHostName()); - assertEquals(9200, httpHost.getPort()); + assertEquals(1, restClient.getNodes().size()); + Node node = restClient.getNodes().get(0); + assertEquals("localhost", node.getHost().getHostName()); + assertEquals(9200, node.getHost().getPort()); } int emptyList = 0; int failures = 0; int runs = 0; - List lastHosts = Collections.singletonList(initialHost); + List lastNodes = Collections.singletonList(initialNode); for (int i = 0; i < iters; i++) { try { runs++; sniffer.sniff(); - if (hostsSniffer.failures.get() > failures) { + if (nodesSniffer.failures.get() > failures) { failures++; - fail("should have failed given that hostsSniffer says it threw an exception"); - } else if (hostsSniffer.emptyList.get() > emptyList) { + fail("should have failed given that nodesSniffer says it threw an exception"); + } else if (nodesSniffer.emptyList.get() > emptyList) { emptyList++; - assertEquals(lastHosts, restClient.getHosts()); + assertEquals(lastNodes, restClient.getNodes()); } else { - assertNotEquals(lastHosts, restClient.getHosts()); - List expectedHosts = CountingHostsSniffer.buildHosts(runs); - assertEquals(expectedHosts, restClient.getHosts()); - lastHosts = restClient.getHosts(); + assertNotEquals(lastNodes, restClient.getNodes()); + List expectedNodes = CountingNodesSniffer.buildNodes(runs); + assertEquals(expectedNodes, restClient.getNodes()); + lastNodes = restClient.getNodes(); } } catch(IOException e) { - if (hostsSniffer.failures.get() > failures) { + if (nodesSniffer.failures.get() > failures) { failures++; assertEquals("communication breakdown", e.getMessage()); } } } - assertEquals(hostsSniffer.emptyList.get(), emptyList); - assertEquals(hostsSniffer.failures.get(), failures); - assertEquals(hostsSniffer.runs.get(), runs); + assertEquals(nodesSniffer.emptyList.get(), emptyList); + assertEquals(nodesSniffer.failures.get(), failures); + assertEquals(nodesSniffer.runs.get(), runs); } } } /** - * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}. + * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link NodesSniffer}. * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. - * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes + * The {@link CountingNodesSniffer} doesn't make any network connection but may throw exception or return no nodes, which makes * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values. @@ -143,7 +144,7 @@ public void testOrdinarySniffRounds() throws Exception { final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); final int iters = randomIntBetween(30, 100); final Set> futures = new CopyOnWriteArraySet<>(); final CountDownLatch completionLatch = new CountDownLatch(1); @@ -185,7 +186,7 @@ public void shutdown() { } }; try { - new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); assertEquals(iters, futures.size()); //the last future is the only one that may not be completed yet, as the count down happens @@ -200,10 +201,10 @@ public void shutdown() { executor.shutdown(); assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); } - int totalRuns = hostsSniffer.runs.get(); + int totalRuns = nodesSniffer.runs.get(); assertEquals(iters, totalRuns); - int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -234,7 +235,7 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); + Sniffer sniffer = new Sniffer(restClient, new MockNodesSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); assertEquals(0, shutdown.get()); int iters = randomIntBetween(3, 10); for (int i = 1; i <= iters; i++) { @@ -246,7 +247,7 @@ public void shutdown() { public void testSniffOnFailureNotInitialized() { RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); final AtomicInteger scheduleCalls = new AtomicInteger(0); @@ -262,15 +263,15 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); for (int i = 0; i < 10; i++) { sniffer.sniffOnFailure(); } assertEquals(1, scheduleCalls.get()); - int totalRuns = hostsSniffer.runs.get(); + int totalRuns = nodesSniffer.runs.get(); assertEquals(0, totalRuns); - int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -281,7 +282,7 @@ public void shutdown() { */ public void testSniffOnFailure() throws Exception { RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); final AtomicBoolean initializing = new AtomicBoolean(true); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); @@ -351,7 +352,7 @@ private Future scheduleOrSubmit(Sniffer.Task task) { public void shutdown() { } }; - final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + final Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); @@ -413,9 +414,9 @@ public void run() { } assertEquals(onFailureTasks.size(), cancelledTasks); - assertEquals(completedTasks, hostsSniffer.runs.get()); - int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + assertEquals(completedTasks, nodesSniffer.runs.get()); + int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } finally { executor.shutdown(); @@ -446,7 +447,7 @@ private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task public void testTaskCancelling() throws Exception { RestClient restClient = mock(RestClient.class); - HostsSniffer hostsSniffer = mock(HostsSniffer.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -457,7 +458,7 @@ public Future schedule(Sniffer.Task task, long delayMillis) { public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { int numIters = randomIntBetween(50, 100); @@ -540,18 +541,18 @@ boolean await() throws InterruptedException { } /** - * Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host. - * It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled. + * Mock {@link NodesSniffer} implementation used for testing, which most of the times return a fixed node. + * It rarely throws exception or return an empty list of nodes, to make sure that such situations are properly handled. * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run * at a given point in time. */ - private static class CountingHostsSniffer implements HostsSniffer { + private static class CountingNodesSniffer implements NodesSniffer { private final AtomicInteger runs = new AtomicInteger(0); private final AtomicInteger failures = new AtomicInteger(0); private final AtomicInteger emptyList = new AtomicInteger(0); @Override - public List sniffHosts() throws IOException { + public List sniff() throws IOException { int run = runs.incrementAndGet(); if (rarely()) { failures.incrementAndGet(); @@ -562,24 +563,23 @@ public List sniffHosts() throws IOException { emptyList.incrementAndGet(); return Collections.emptyList(); } - return buildHosts(run); + return buildNodes(run); } - private static List buildHosts(int run) { + private static List buildNodes(int run) { int size = run % 5 + 1; assert size > 0; - List hosts = new ArrayList<>(size); + List nodes = new ArrayList<>(size); for (int i = 0; i < size; i++) { - hosts.add(new HttpHost("sniffed-" + run, 9200 + i)); + nodes.add(new Node(new HttpHost("sniffed-" + run, 9200 + i))); } - return hosts; + return nodes; } } - @SuppressWarnings("unchecked") public void testDefaultSchedulerSchedule() { RestClient restClient = mock(RestClient.class); - HostsSniffer hostsSniffer = mock(HostsSniffer.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -591,7 +591,7 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java index 199632d478f81..5f305024dba20 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java @@ -20,9 +20,10 @@ package org.elasticsearch.client.sniff.documentation; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.HostsSniffer; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; +import org.elasticsearch.client.sniff.NodesSniffer; import org.elasticsearch.client.sniff.SniffOnFailureListener; import org.elasticsearch.client.sniff.Sniffer; @@ -91,12 +92,12 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, - ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, - ElasticsearchHostsSniffer.Scheme.HTTPS); + ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, + ElasticsearchNodesSniffer.Scheme.HTTPS); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniffer-https } { @@ -104,28 +105,28 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, TimeUnit.SECONDS.toMillis(5), - ElasticsearchHostsSniffer.Scheme.HTTP); + ElasticsearchNodesSniffer.Scheme.HTTP); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniff-request-timeout } { - //tag::custom-hosts-sniffer + //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new HostsSniffer() { + NodesSniffer nodesSniffer = new NodesSniffer() { @Override - public List sniffHosts() throws IOException { + public List sniff() throws IOException { return null; // <1> } }; Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); - //end::custom-hosts-sniffer + .setNodesSniffer(nodesSniffer).build(); + //end::custom-nodes-sniffer } } } diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json new file mode 100644 index 0000000000000..b370e78e16011 --- /dev/null +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -0,0 +1,141 @@ +{ + "cluster_name" : "elasticsearch", + "nodes" : { + "qYUZ_8bTRwODPxukDlFw6Q" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9204", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "Yej5UVNgR2KgBjUFHOQpCw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9207", + "attributes" : { + "data" : "false", + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "mHttJwhwReangKEx9EGuAg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9201", + "attributes" : { + "data" : "false", + "master" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "6Erdptt_QRGLxMiLi9mTkg" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9206", + "attributes" : { + "data" : "false", + "client" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "mLRCZBypTiys6e8KY5DMnA" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9200", + "attributes" : { + "data" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "pVqOhytXQwetsZVzCBppYw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9202", + "http" : { + "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "ARyzVfpJSw2a9TOIUpbsBA" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9205", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "2Hpid-g5Sc2BKCevhN6VQw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9203", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json new file mode 100644 index 0000000000000..7a7d143ecaf43 --- /dev/null +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "DXz_rhcdSF2xJ96qyjaLVw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "53Mi6jYdRgeR1cdyuoNfQQ" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "XBIghcHiRlWP9c4vY6rETw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "cFM30FlyS8K1njH_bovwwQ" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "eoVUVRGNRDyyOapqIcrsIA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "xPN76uDcTP-DyXaRzPg2NQ" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "RY0oW2d7TISEqazk-U4Kcw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "tU0rXEZmQ9GsWfn2TQ4kow" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json new file mode 100644 index 0000000000000..5a8905da64c89 --- /dev/null +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "FX9npqGQSL2mOGF8Zkf3hw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "jmUqzYLGTbWCg127kve3Tg" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "soBU6bzvTOqdLxPstSbJ2g" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "mtYDAhURTP6twdmNAkMnOg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "URxHiUQPROOt1G22Ev6lXw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "_06S_kWoRqqFR8Z8CS3JRw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "QZE5Bd6DQJmnfVs2dglOvA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "_3mTXg6dSweZn5ReB2fQqw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt new file mode 100644 index 0000000000000..ccb9bb15edb55 --- /dev/null +++ b/client/sniffer/src/test/resources/readme.txt @@ -0,0 +1,4 @@ +`*_node_http.json` contains files created by spinning up toy clusters with a +few nodes in different configurations locally at various versions. They are +for testing `ElasticsearchNodesSniffer` against different versions of +Elasticsearch. diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 14a5058eb7272..3e9b9fa7ea08f 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -144,3 +144,13 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance will be referenced as `client`. + +[[java-rest-hight-getting-started-request-options]] +=== RequestOptions + +All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can +use to customize the request in ways that won't change how Elasticsearch +executes the request. For example, this is the place where you'd specify a +`NodeSelector` to control which node receives the request. See the +<> for +more examples of customizing the options. diff --git a/docs/java-rest/low-level/sniffer.asciidoc b/docs/java-rest/low-level/sniffer.asciidoc index 4f846847615ea..1ffaa519cfb50 100644 --- a/docs/java-rest/low-level/sniffer.asciidoc +++ b/docs/java-rest/low-level/sniffer.asciidoc @@ -55,7 +55,7 @@ dependencies { Once a `RestClient` instance has been created as shown in <>, a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient` to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster -and update them by calling `RestClient#setHosts`. +and update them by calling `RestClient#setNodes`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -105,7 +105,7 @@ on failure is not enabled like explained above. The Elasticsearch Nodes Info api doesn't return the protocol to use when connecting to the nodes but only their `host:port` key-pair, hence `http` is used by default. In case `https` should be used instead, the -`ElasticsearchHostsSniffer` instance has to be manually created and provided +`ElasticsearchNodesSniffer` instance has to be manually created and provided as follows: ["source","java",subs="attributes,callouts,macros"] @@ -125,12 +125,12 @@ cluster, the ones that have responded until then. include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout] -------------------------------------------------- -Also, a custom `HostsSniffer` implementation can be provided for advanced -use-cases that may require fetching the hosts from external sources rather +Also, a custom `NodesSniffer` implementation can be provided for advanced +use-cases that may require fetching the `Node`s from external sources rather than from Elasticsearch: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer] +include-tagged::{doc-tests}/SnifferDocumentation.java[custom-nodes-sniffer] -------------------------------------------------- <1> Fetch the hosts from the external source diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 012ce418226cd..407947000de35 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,24 +271,51 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can add one or more headers to send with the request: +[[java-rest-low-usage-request-options]] +==== RequestOptions + +The `RequestOptions` class holds parts of the request that should be shared +between many requests in the same application. You can make a singleton +instance and share it between all requests: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton] -------------------------------------------------- +<1> Add any headers needed by all requests. +<2> Set a `NodeSelector`. +<3> Customize the response consumer. + +`addHeader` is for headers that are required for authorization or to work with +a proxy in front of Elasticsearch. There is no need to set the `Content-Type` +header because the client will automatically set that from the `HttpEntity` +attached to the request. + +You can set the `NodeSelector` which controls which nodes will receive +requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice. You can also customize the response consumer used to buffer the asynchronous responses. The default consumer will buffer up to 100MB of response on the JVM heap. If the response is larger then the request will fail. You could, for example, lower the maximum size which might be useful if you are running -in a heap constrained environment: +in a heap constrained environment like the exmaple above. + +Once you've created the singleton you can use it when making requests: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-set-singleton] +-------------------------------------------------- + +You can also customize these options on a per request basis. For example, this +adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] -------------------------------------------------- + ==== Multiple parallel asynchronous actions The client is quite happy to execute many actions in parallel. The following diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c93873a5be429..c2259c7b55d14 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -197,6 +197,24 @@ header. The warnings must match exactly. Using it looks like this: id: 1 .... +If the arguments to `do` include `node_selector` then the request is only +sent to nodes that match the `node_selector`. Currently only the `version` +selector is supported and it has the same logic as the `version` field in +`skip`. It looks like this: + +.... +"test id": + - skip: + features: node_selector + - do: + node_selector: + version: " - 6.9.99" + index: + index: test-weird-index-中文 + type: weird.type + id: 1 + body: { foo: bar } +.... === `set` diff --git a/test/framework/build.gradle b/test/framework/build.gradle index c497b63469450..39f1b75242880 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks; dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch-cli:${version}" compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index ac9b87c8fc6fe..df79b8f4a7add 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.test.rest.yaml; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -45,22 +47,29 @@ public ClientYamlDocsTestClient(ClientYamlSuiteRestSpec restSpec, RestClient res super(restSpec, restClient, hosts, esVersion); } - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + @Override + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { if ("raw".equals(apiName)) { - // Raw requests are bit simpler.... + // Raw requests don't use the rest spec at all and are configured entirely by their parameters Map queryStringParams = new HashMap<>(params); String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); String path = "/" + Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); - // And everything else is a url parameter! + Request request = new Request(method, path); + // All other parameters are url parameters + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + setOptions(request, headers, nodeSelector); try { - Response response = restClient.performRequest(method, path, queryStringParams, entity); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch (ResponseException e) { throw new ClientYamlTestResponseException(e); } } - return super.callApi(apiName, params, entity, headers); + return super.callApi(apiName, params, entity, headers, nodeSelector); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 795d99c51ef43..ddf50c193d3b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,18 +19,19 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; @@ -75,8 +76,8 @@ public Version getEsVersion() { /** * Calls an api with the provided parameters and body */ - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { ClientYamlSuiteRestApi restApi = restApi(apiName); @@ -161,22 +162,33 @@ public ClientYamlTestResponse callApi(String apiName, Map params requestPath = finalPath.toString(); } - Header[] requestHeaders = new Header[headers.size()]; - int index = 0; - for (Map.Entry header : headers.entrySet()) { - logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); - requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); - } + logger.debug("calling api [{}]", apiName); + Request request = new Request(requestMethod, requestPath); + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + setOptions(request, headers, nodeSelector); try { - Response response = restClient.performRequest(requestMethod, requestPath, queryStringParams, entity, requestHeaders); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch(ResponseException e) { throw new ClientYamlTestResponseException(e); } } + protected static void setOptions(Request request, Map headers, NodeSelector nodeSelector) { + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Map.Entry header : headers.entrySet()) { + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); + options.addHeader(header.getKey(), header.getValue()); + } + options.setNodeSelector(nodeSelector); + request.setOptions(options); + } + private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, long contentLength) { if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { // The API doesn't claim to support GET anyway diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index ca04c0c53d12a..e1d889a899565 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -68,6 +69,15 @@ public class ClientYamlTestExecutionContext { */ public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, Map headers) throws IOException { + return callApi(apiName, params, bodies, headers, NodeSelector.ANY); + } + + /** + * Calls an elasticsearch api with the parameters and request body provided as arguments. + * Saves the obtained response in the execution context. + */ + public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, + Map headers, NodeSelector nodeSelector) throws IOException { //makes a copy of the parameters before modifying them for this specific request Map requestParams = new HashMap<>(params); requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params @@ -87,7 +97,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params HttpEntity entity = createEntity(bodies, requestHeaders); try { - response = callApiInternal(apiName, requestParams, entity, requestHeaders); + response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); return response; } catch(ClientYamlTestResponseException e) { response = e.getRestTestResponse(); @@ -153,9 +163,9 @@ private BytesRef bodyAsBytesRef(Map bodyAsMap, XContentType xCon } // pkg-private for testing - ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, Map headers) throws IOException { - return clientYamlTestClient.callApi(apiName, params, entity, headers); + ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { + return clientYamlTestClient.callApi(apiName, params, entity, headers, nodeSelector); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index de4b451807d99..9fa13859042ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,9 +22,11 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.elasticsearch.Version; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -47,11 +49,20 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; /** - * Runs a suite of yaml tests shared with all the official Elasticsearch clients against against an elasticsearch cluster. + * Runs a suite of yaml tests shared with all the official Elasticsearch + * clients against against an elasticsearch cluster. + *

+ * IMPORTANT: These tests sniff the cluster for metadata + * and hosts on startup and replace the list of hosts that they are + * configured to use with the list sniffed from the cluster. So you can't + * control which nodes receive the request by providing the right list of + * nodes in the tests.rest.cluster system property. Instead + * the tests must explictly use `node_selector`s. */ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { @@ -102,6 +113,11 @@ protected ESClientYamlSuiteTestCase(ClientYamlTestCandidate testCandidate) { @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { + // Sniff host metadata in case we need it in the yaml tests + List nodesWithMetadata = sniffHostMetadata(adminClient()); + client().setNodes(nodesWithMetadata); + adminClient().setNodes(nodesWithMetadata); + assert adminExecutionContext == null; assert blacklistPathMatchers == null; ClientYamlSuiteRestSpec restSpec = ClientYamlSuiteRestSpec.load(SPEC_PATH); @@ -376,4 +392,15 @@ private String errorMessage(ExecutableSection executableSection, Throwable t) { protected boolean randomizeContentType() { return true; } + + /** + * Sniff the cluster for host metadata. + */ + private List sniffHostMetadata(RestClient client) throws IOException { + ElasticsearchNodesSniffer.Scheme scheme = + ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( + adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + return sniffer.sniff(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index 3168543b5554b..f3201f3ae60bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -37,6 +37,7 @@ public final class Features { "catch_unauthorized", "embedded_stash_key", "headers", + "node_selector", "stash_in_key", "stash_in_path", "stash_path_replace", diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java deleted file mode 100644 index de63b46eff313..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Parses YAML test {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite}s containing - * {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection}s. - */ -package org.elasticsearch.test.rest.yaml.parser; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java index 4553845458541..de73fefaea776 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Map; +import org.elasticsearch.client.NodeSelector; + import static java.util.Collections.unmodifiableMap; /** @@ -35,6 +37,7 @@ public class ApiCallSection { private final Map params = new HashMap<>(); private final Map headers = new HashMap<>(); private final List> bodies = new ArrayList<>(); + private NodeSelector nodeSelector = NodeSelector.ANY; public ApiCallSection(String api) { this.api = api; @@ -76,4 +79,18 @@ public void addBody(Map body) { public boolean hasBody() { return bodies.size() > 0; } + + /** + * Selects the node on which to run this request. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + + /** + * Set the selector that decides which node can run this request. + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = nodeSelector; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java index 321d22ed70aa7..1ec2382fac596 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -91,6 +92,12 @@ public void addExecutableSection(ExecutableSection executableSection) { + "runners that do not support the [warnings] section can skip the test at line [" + doSection.getLocation().lineNumber + "]"); } + if (NodeSelector.ANY != doSection.getApiCallSection().getNodeSelector() + && false == skipSection.getFeatures().contains("node_selector")) { + throw new IllegalArgumentException("Attempted to add a [do] with a [node_selector] section without a corresponding " + + "[skip] so runners that do not support the [node_selector] section can skip the test at line [" + + doSection.getLocation().lineNumber + "]"); + } } this.executableSections.add(executableSection); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 81d5c1d32a94b..4754ea0fc4d66 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -20,6 +20,9 @@ package org.elasticsearch.test.rest.yaml.section; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -37,9 +40,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeMap; import java.util.regex.Matcher; @@ -84,6 +89,7 @@ public static DoSection parse(XContentParser parser) throws IOException { DoSection doSection = new DoSection(parser.getTokenLocation()); ApiCallSection apiCallSection = null; + NodeSelector nodeSelector = NodeSelector.ANY; Map headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); List expectedWarnings = new ArrayList<>(); @@ -120,6 +126,18 @@ public static DoSection parse(XContentParser parser) throws IOException { headers.put(headerName, parser.text()); } } + } else if ("node_selector".equals(currentFieldName)) { + String selectorName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + selectorName = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = buildNodeSelector( + parser.getTokenLocation(), selectorName, parser.text()); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); + } + } } else if (currentFieldName != null) { // must be part of API call then apiCallSection = new ApiCallSection(currentFieldName); String paramName = null; @@ -152,6 +170,7 @@ public static DoSection parse(XContentParser parser) throws IOException { throw new IllegalArgumentException("client call section is mandatory within a do section"); } apiCallSection.addHeaders(headers); + apiCallSection.setNodeSelector(nodeSelector); doSection.setApiCallSection(apiCallSection); doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); } finally { @@ -221,7 +240,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx try { ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), - apiCallSection.getBodies(), apiCallSection.getHeaders()); + apiCallSection.getBodies(), apiCallSection.getHeaders(), apiCallSection.getNodeSelector()); if (Strings.hasLength(catchParam)) { String catchStatusCode; if (catches.containsKey(catchParam)) { @@ -346,4 +365,61 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, not(equalTo(408)), not(equalTo(409))))); } + + private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + switch (name) { + case "version": + Version[] range = SkipSection.parseVersionRange(value); + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; + default: + throw new IllegalArgumentException("unknown node_selector [" + name + "]"); + } + } + + /** + * Selector that composes two selectors, running the "right" most selector + * first and then running the "left" selector on the results of the "right" + * selector. + */ + private static class ComposeNodeSelector implements NodeSelector { + private final NodeSelector lhs; + private final NodeSelector rhs; + + private ComposeNodeSelector(NodeSelector lhs, NodeSelector rhs) { + this.lhs = Objects.requireNonNull(lhs, "lhs is required"); + this.rhs = Objects.requireNonNull(rhs, "rhs is required"); + } + + @Override + public void select(Iterable nodes) { + rhs.select(nodes); + lhs.select(nodes); + } + + @Override + public String toString() { + // . as in haskell's "compose" operator + return lhs + "." + rhs; + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java index eb1fea4b79aed..e487f8e74da3b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java @@ -153,7 +153,7 @@ public boolean isEmpty() { return EMPTY.equals(this); } - private Version[] parseVersionRange(String versionRange) { + static Version[] parseVersionRange(String versionRange) { if (versionRange == null) { return new Version[] { null, null }; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 2150baf59eab0..fbf7f10e5e186 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -36,8 +37,7 @@ public void testHeadersSupportStashedValueReplacement() throws IOException { new ClientYamlTestExecutionContext(null, randomBoolean()) { @Override ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, - Map headers) { + HttpEntity entity, Map headers, NodeSelector nodeSelector) { headersRef.set(headers); return null; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index ecee131c7a28e..87f2d7f9a53f8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -35,11 +36,12 @@ import static org.hamcrest.Matchers.nullValue; public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testAddingDoWithoutWarningWithoutSkip() { + public void testAddingDoWithoutSkips() { int lineNumber = between(1, 10000); ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); section.setSkipSection(SkipSection.EMPTY); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -49,6 +51,7 @@ public void testAddingDoWithWarningWithSkip() { section.setSkipSection(new SkipSection(null, singletonList("warnings"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -58,11 +61,37 @@ public void testAddingDoWithWarningWithSkipButNotWarnings() { section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the" + " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage()); } + public void testAddingDoWithNodeSelectorWithSkip() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + section.addExecutableSection(doSection); + } + + public void testAddingDoWithNodeSelectorWithSkipButNotWarnings() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); + assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding" + + " [skip] so runners that do not support the [node_selector] section can skip the test at" + + " line [" + lineNumber + "]", e.getMessage()); + } + public void testWrongIndentation() throws Exception { { XContentParser parser = createParser(YamlXContent.yamlXContent, diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 982eac4b80274..d5ee934bc531d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -19,23 +19,35 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase { @@ -496,7 +508,40 @@ public void testParseDoSectionExpectedWarnings() throws Exception { assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( "just one entry this time"))); + } + + public void testNodeSelector() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node v170 = nodeWithVersion("1.7.0"); + Node v521 = nodeWithVersion("5.2.1"); + Node v550 = nodeWithVersion("5.5.0"); + Node v612 = nodeWithVersion("6.1.2"); + List nodes = new ArrayList<>(); + nodes.add(v170); + nodes.add(v521); + nodes.add(v550); + nodes.add(v612); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(v521, v550), nodes); + ClientYamlTestExecutionContext context = mock(ClientYamlTestExecutionContext.class); + ClientYamlTestResponse mockResponse = mock(ClientYamlTestResponse.class); + when(context.callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector())).thenReturn(mockResponse); + doSection.execute(context); + verify(context).callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); + } + private Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null); } private void assertJsonEquals(Map actual, String expected) throws IOException { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index af0c543678048..e2ca3516159a0 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -17,8 +17,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer.Scheme; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -303,11 +302,12 @@ static Sniffer createSniffer(final Config config, final RestClient client, final if (sniffingEnabled) { final List hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); // createHosts(config) ensures that all schemes are the same for all hosts! - final Scheme scheme = hosts.get(0).startsWith("https") ? Scheme.HTTPS : Scheme.HTTP; - final ElasticsearchHostsSniffer hostsSniffer = - new ElasticsearchHostsSniffer(client, ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + final ElasticsearchNodesSniffer.Scheme scheme = hosts.get(0).startsWith("https") ? + ElasticsearchNodesSniffer.Scheme.HTTPS : ElasticsearchNodesSniffer.Scheme.HTTP; + final ElasticsearchNodesSniffer hostsSniffer = + new ElasticsearchNodesSniffer(client, ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); - sniffer = Sniffer.builder(client).setHostsSniffer(hostsSniffer).build(); + sniffer = Sniffer.builder(client).setNodesSniffer(hostsSniffer).build(); // inform the sniffer whenever there's a node failure listener.setSniffer(sniffer); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java index 92febdf3561f8..aa8d2da070eb5 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.common.Nullable; @@ -76,7 +77,8 @@ public void setResource(@Nullable final HttpResource resource) { } @Override - public void onFailure(final HttpHost host) { + public void onFailure(final Node node) { + HttpHost host = node.getHost(); logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort()); final HttpResource resource = this.resource.get(); @@ -90,4 +92,4 @@ public void onFailure(final HttpHost host) { } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 52eed801b3273..2c8c700fcf615 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; @@ -44,8 +45,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyMapOf; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; @@ -300,7 +299,7 @@ public void testCreateSniffer() throws IOException { final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON); when(response.getEntity()).thenReturn(entity); - when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response); + when(client.performRequest(any(Request.class))).thenReturn(response); try (Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) { assertThat(sniffer, not(nullValue())); @@ -309,7 +308,7 @@ public void testCreateSniffer() throws IOException { } // it's a race whether it triggers this at all - verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class)); + verify(client, atMost(1)).performRequest(any(Request.class)); verifyNoMoreInteractions(client, listener); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java index 08512e82e145d..a81874b7fa2c1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java @@ -7,6 +7,7 @@ import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce.AlreadySetException; +import org.elasticsearch.client.Node; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.test.ESTestCase; @@ -21,7 +22,7 @@ public class NodeFailureListenerTests extends ESTestCase { private final Sniffer sniffer = mock(Sniffer.class); private final HttpResource resource = new MockHttpResource(getTestName(), false); - private final HttpHost host = new HttpHost("localhost", 9200); + private final Node node = new Node(new HttpHost("localhost", 9200)); private final NodeFailureListener listener = new NodeFailureListener(); @@ -44,7 +45,7 @@ public void testSetResourceTwiceFails() { public void testSnifferNotifiedOnFailure() { listener.setSniffer(sniffer); - listener.onFailure(host); + listener.onFailure(node); verify(sniffer).sniffOnFailure(); } @@ -52,7 +53,7 @@ public void testSnifferNotifiedOnFailure() { public void testResourceNotifiedOnFailure() { listener.setResource(resource); - listener.onFailure(host); + listener.onFailure(node); assertTrue(resource.isDirty()); } @@ -64,7 +65,7 @@ public void testResourceAndSnifferNotifiedOnFailure() { listener.setResource(optionalResource); listener.setSniffer(optionalSniffer); - listener.onFailure(host); + listener.onFailure(node); if (optionalResource != null) { assertTrue(resource.isDirty());