Skip to content

Commit

Permalink
LLClient: Support host selection (#30523)
Browse files Browse the repository at this point in the history
Allows users of the Low Level REST client to specify which hosts a
request should be run on. They implement the  `NodeSelector` interface
or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes
are valid. Using it looks like:
```
Request request = new Request("POST", "/foo/_search");
RequestOptions options = request.getOptions().toBuilder();
options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
request.setOptions(options);
...
```

This introduces a new `Node` object which contains a `HttpHost` and the
metadata about the host. At this point that metadata is just `version`
and `roles` but I plan to add node attributes in a followup. The
canonical way to **get** this metadata is to use the `Sniffer` to pull
the information from the Elasticsearch cluster.

I've marked this as "breaking-java" because it breaks custom
implementations of `HostsSniffer` by renaming the interface to
`NodesSniffer` and by changing it from returning a `List<HttpHost>` to a
`List<Node>`. It *shouldn't* break anyone else though.

Because we expect to find it useful, this also implements `host_selector`
support to `do` statements in the yaml tests. Using it looks a little
like:

```
---
"example test":
  - skip:
      features: host_selector
  - do:
      host_selector:
        version: " - 7.0.0" # same syntax as skip
      apiname:
        something: true
```

The `do` section parses the `version` string into a host selector that
uses the same version comparison logic as the `skip` section. When the
`do` section is executed it passed the off to the `RestClient`, using
the `ElasticsearchHostsSniffer` to sniff the required metadata.

The idea is to use this in mixed version tests to target a specific
version of Elasticsearch so we can be sure about the deprecation
logging though we don't currently have any examples that need it. We do,
however, have at least one open pull request that requires something
like this to properly test it.

Closes #21888
  • Loading branch information
nik9000 committed Jun 15, 2018
1 parent 4bcdcbd commit 3247012
Show file tree
Hide file tree
Showing 57 changed files with 2,433 additions and 473 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
final class DeadHostState implements Comparable<DeadHostState> {

private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);

private final int failedAttempts;
private final long deadUntilNanos;
Expand All @@ -55,12 +55,12 @@ final class DeadHostState implements Comparable<DeadHostState> {
*
* @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt
*/
DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) {
DeadHostState(DeadHostState previousDeadHostState) {
long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
MAX_CONNECTION_TIMEOUT_NANOS);
this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos;
this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos;
this.failedAttempts = previousDeadHostState.failedAttempts + 1;
this.timeSupplier = timeSupplier;
this.timeSupplier = previousDeadHostState.timeSupplier;
}

/**
Expand All @@ -86,6 +86,10 @@ int getFailedAttempts() {

@Override
public int compareTo(DeadHostState other) {
if (timeSupplier != other.timeSupplier) {
throw new IllegalArgumentException("can't compare DeadHostStates with different clocks ["
+ timeSupplier + " != " + other.timeSupplier + "]");
}
return Long.compare(deadUntilNanos, other.deadUntilNanos);
}

Expand All @@ -94,19 +98,24 @@ public String toString() {
return "DeadHostState{" +
"failedAttempts=" + failedAttempts +
", deadUntilNanos=" + deadUntilNanos +
", timeSupplier=" + timeSupplier +
'}';
}

/**
* Time supplier that makes timing aspects pluggable to ease testing
*/
interface TimeSupplier {

TimeSupplier DEFAULT = new TimeSupplier() {
@Override
public long nanoTime() {
return System.nanoTime();
}

@Override
public String toString() {
return "nanoTime";
}
};

long nanoTime();
Expand Down
213 changes: 213 additions & 0 deletions client/rest/src/main/java/org/elasticsearch/client/Node.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.client;

import java.util.Objects;
import java.util.Set;

import org.apache.http.HttpHost;

/**
* Metadata about an {@link HttpHost} running Elasticsearch.
*/
public class Node {
/**
* Address that this host claims is its primary contact point.
*/
private final HttpHost host;
/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
private final Set<HttpHost> boundHosts;
/**
* Name of the node as configured by the {@code node.name} attribute.
*/
private final String name;
/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
private final String version;
/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
private final Roles roles;

/**
* Create a {@linkplain Node} with metadata. All parameters except
* {@code host} are nullable and implementations of {@link NodeSelector}
* need to decide what to do in their absence.
*/
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles) {
if (host == null) {
throw new IllegalArgumentException("host cannot be null");
}
this.host = host;
this.boundHosts = boundHosts;
this.name = name;
this.version = version;
this.roles = roles;
}

/**
* Create a {@linkplain Node} without any metadata.
*/
public Node(HttpHost host) {
this(host, null, null, null, null);
}

/**
* Contact information for the host.
*/
public HttpHost getHost() {
return host;
}

/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
public Set<HttpHost> getBoundHosts() {
return boundHosts;
}

/**
* The {@code node.name} of the node.
*/
public String getName() {
return name;
}

/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
public String getVersion() {
return version;
}

/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
public Roles getRoles() {
return roles;
}

@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("[host=").append(host);
if (boundHosts != null) {
b.append(", bound=").append(boundHosts);
}
if (name != null) {
b.append(", name=").append(name);
}
if (version != null) {
b.append(", version=").append(version);
}
if (roles != null) {
b.append(", roles=").append(roles);
}
return b.append(']').toString();
}

@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Node other = (Node) obj;
return host.equals(other.host)
&& Objects.equals(boundHosts, other.boundHosts)
&& Objects.equals(name, other.name)
&& Objects.equals(version, other.version)
&& Objects.equals(roles, other.roles);
}

@Override
public int hashCode() {
return Objects.hash(host, boundHosts, name, version, roles);
}

/**
* Role information about an Elasticsearch process.
*/
public static final class Roles {
private final boolean masterEligible;
private final boolean data;
private final boolean ingest;

public Roles(boolean masterEligible, boolean data, boolean ingest) {
this.masterEligible = masterEligible;
this.data = data;
this.ingest = ingest;
}

/**
* Teturns whether or not the node <strong>could</strong> be elected master.
*/
public boolean isMasterEligible() {
return masterEligible;
}
/**
* Teturns whether or not the node stores data.
*/
public boolean isData() {
return data;
}
/**
* Teturns whether or not the node runs ingest pipelines.
*/
public boolean isIngest() {
return ingest;
}

@Override
public String toString() {
StringBuilder result = new StringBuilder(3);
if (masterEligible) result.append('m');
if (data) result.append('d');
if (ingest) result.append('i');
return result.toString();
}

@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Roles other = (Roles) obj;
return masterEligible == other.masterEligible
&& data == other.data
&& ingest == other.ingest;
}

@Override
public int hashCode() {
return Objects.hash(masterEligible, data, ingest);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.client;

import java.util.Iterator;

/**
* Selects nodes that can receive requests. Used to keep requests away
* from master nodes or to send them to nodes with a particular attribute.
* Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}.
*/
public interface NodeSelector {
/**
* Select the {@link Node}s to which to send requests. This is called with
* a mutable {@link Iterable} of {@linkplain Node}s in the order that the
* rest client would prefer to use them and implementers should remove
* nodes from the that should not receive the request. Implementers may
* iterate the nodes as many times as they need.
* <p>
* This may be called twice per request: first for "living" nodes that
* have not been blacklisted by previous errors. If the selector removes
* all nodes from the list or if there aren't any living nodes then the
* {@link RestClient} will call this method with a list of "dead" nodes.
* <p>
* Implementers should not rely on the ordering of the nodes.
*/
void select(Iterable<Node> nodes);
/*
* We were fairly careful with our choice of Iterable here. The caller has
* a List but reordering the list is likely to break round robin. Luckily
* Iterable doesn't allow any reordering.
*/

/**
* Selector that matches any node.
*/
NodeSelector ANY = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
// Intentionally does nothing
}

@Override
public String toString() {
return "ANY";
}
};

/**
* Selector that matches any node that has metadata and doesn't
* have the {@code master} role OR it has the data {@code data}
* role.
*/
NodeSelector NOT_MASTER_ONLY = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
Node node = itr.next();
if (node.getRoles() == null) continue;
if (node.getRoles().isMasterEligible()
&& false == node.getRoles().isData()
&& false == node.getRoles().isIngest()) {
itr.remove();
}
}
}

@Override
public String toString() {
return "NOT_MASTER_ONLY";
}
};
}
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,14 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR
/**
* Logs a request that failed
*/
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) {
static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e);
}
if (tracer.isTraceEnabled()) {
String traceRequest;
try {
traceRequest = buildTraceRequest(request, host);
traceRequest = buildTraceRequest(request, node.getHost());
} catch (IOException e1) {
tracer.trace("error while reading request for trace purposes", e);
traceRequest = "";
Expand Down
Loading

0 comments on commit 3247012

Please sign in to comment.