From 363ec05df40d0fda6d9fbe5f67f060760542f0a9 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 14 Jun 2018 16:52:32 +0100 Subject: [PATCH 01/41] [ML] Add description to ML filters (#31330) This adds a `description` to ML filters in order to allow users to describe their filters in a human readable form which is also editable (filter updates to be added shortly). --- .../xpack/core/ml/job/config/MlFilter.java | 47 ++++++++++++++++--- .../action/GetFiltersActionResponseTests.java | 5 +- .../action/PutFilterActionRequestTests.java | 14 +----- .../core/ml/job/config/MlFilterTests.java | 19 ++++++-- .../xpack/ml/integration/JobProviderIT.java | 4 +- .../xpack/ml/job/JobManagerTests.java | 2 +- .../ControlMsgToProcessWriterTests.java | 4 +- .../writer/FieldConfigWriterTests.java | 4 +- .../writer/MlFilterWriterTests.java | 5 +- .../rest-api-spec/test/ml/filter_crud.yml | 2 + .../ml/integration/DetectionRulesIT.java | 6 +-- 11 files changed, 75 insertions(+), 37 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index de6ee3d509c69..991f421265ea8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.job.config; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,6 +31,7 @@ public class MlFilter implements ToXContentObject, Writeable { public static final ParseField TYPE = new ParseField("type"); public static final ParseField ID = new ParseField("filter_id"); + public static final ParseField DESCRIPTION = new ParseField("description"); public static final ParseField ITEMS = new ParseField("items"); // For QueryPage @@ -43,27 +45,38 @@ private static ObjectParser createParser(boolean ignoreUnknownFie parser.declareString((builder, s) -> {}, TYPE); parser.declareString(Builder::setId, ID); + parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); parser.declareStringArray(Builder::setItems, ITEMS); return parser; } private final String id; + private final String description; private final List items; - public MlFilter(String id, List items) { + public MlFilter(String id, String description, List items) { this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); + this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); } public MlFilter(StreamInput in) throws IOException { id = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + description = in.readOptionalString(); + } else { + description = null; + } items = Arrays.asList(in.readStringArray()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(description); + } out.writeStringArray(items.toArray(new String[items.size()])); } @@ -71,6 +84,9 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(ID.getPreferredName(), id); + if (description != null) { + builder.field(DESCRIPTION.getPreferredName(), description); + } builder.field(ITEMS.getPreferredName(), items); if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { builder.field(TYPE.getPreferredName(), FILTER_TYPE); @@ -83,6 +99,10 @@ public String getId() { return id; } + public String getDescription() { + return description; + } + public List getItems() { return new ArrayList<>(items); } @@ -98,12 +118,12 @@ public boolean equals(Object obj) { } MlFilter other = (MlFilter) obj; - return id.equals(other.id) && items.equals(other.items); + return id.equals(other.id) && Objects.equals(description, other.description) && items.equals(other.items); } @Override public int hashCode() { - return Objects.hash(id, items); + return Objects.hash(id, description, items); } public String documentId() { @@ -114,30 +134,45 @@ public static String documentId(String filterId) { return DOCUMENT_ID_PREFIX + filterId; } + public static Builder builder(String filterId) { + return new Builder().setId(filterId); + } + public static class Builder { private String id; + private String description; private List items = Collections.emptyList(); + private Builder() {} + public Builder setId(String id) { this.id = id; return this; } - private Builder() {} - @Nullable public String getId() { return id; } + public Builder setDescription(String description) { + this.description = description; + return this; + } + public Builder setItems(List items) { this.items = items; return this; } + public Builder setItems(String... items) { + this.items = Arrays.asList(items); + return this; + } + public MlFilter build() { - return new MlFilter(id, items); + return new MlFilter(id, description, items); } } } \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java index c8465c87587e9..7bda0f6e7de76 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.core.ml.action.GetFiltersAction.Response; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; import java.util.Collections; @@ -17,9 +18,7 @@ public class GetFiltersActionResponseTests extends AbstractStreamableTestCase result; - - MlFilter doc = new MlFilter( - randomAlphaOfLengthBetween(1, 20), Collections.singletonList(randomAlphaOfLengthBetween(1, 20))); + MlFilter doc = MlFilterTests.createRandom(); result = new QueryPage<>(Collections.singletonList(doc), 1, MlFilter.RESULTS_FIELD); return new Response(result); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java index 21845922470f0..dfc3f5f37f40c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java @@ -8,10 +8,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.action.PutFilterAction.Request; -import org.elasticsearch.xpack.core.ml.job.config.MlFilter; - -import java.util.ArrayList; -import java.util.List; +import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; public class PutFilterActionRequestTests extends AbstractStreamableXContentTestCase { @@ -19,13 +16,7 @@ public class PutFilterActionRequestTests extends AbstractStreamableXContentTestC @Override protected Request createTestInstance() { - int size = randomInt(10); - List items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - items.add(randomAlphaOfLengthBetween(1, 20)); - } - MlFilter filter = new MlFilter(filterId, items); - return new PutFilterAction.Request(filter); + return new PutFilterAction.Request(MlFilterTests.createRandom(filterId)); } @Override @@ -42,5 +33,4 @@ protected Request createBlankInstance() { protected Request doParseInstance(XContentParser parser) { return PutFilterAction.Request.parseRequest(filterId, parser); } - } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index 1b61e3ec9a43d..78d87b82839a2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -26,12 +26,25 @@ public static MlFilter createTestFilter() { @Override protected MlFilter createTestInstance() { + return createRandom(); + } + + public static MlFilter createRandom() { + return createRandom(randomAlphaOfLengthBetween(1, 20)); + } + + public static MlFilter createRandom(String filterId) { + String description = null; + if (randomBoolean()) { + description = randomAlphaOfLength(20); + } + int size = randomInt(10); List items = new ArrayList<>(size); for (int i = 0; i < size; i++) { items.add(randomAlphaOfLengthBetween(1, 20)); } - return new MlFilter(randomAlphaOfLengthBetween(1, 20), items); + return new MlFilter(filterId, description, items); } @Override @@ -45,13 +58,13 @@ protected MlFilter doParseInstance(XContentParser parser) { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, Collections.emptyList())); + NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", Collections.emptyList())); assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); } public void testNullItems() { NullPointerException ex = - expectThrows(NullPointerException.class, () -> new MlFilter(randomAlphaOfLengthBetween(1, 20), null)); + expectThrows(NullPointerException.class, () -> new MlFilter(randomAlphaOfLengthBetween(1, 20), "", null)); assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java index 7e0dc453f07ee..856b930ac49b5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java @@ -385,8 +385,8 @@ public void testGetAutodetectParams() throws Exception { indexScheduledEvents(events); List filters = new ArrayList<>(); - filters.add(new MlFilter("fruit", Arrays.asList("apple", "pear"))); - filters.add(new MlFilter("tea", Arrays.asList("green", "builders"))); + filters.add(MlFilter.builder("fruit").setItems("apple", "pear").build()); + filters.add(MlFilter.builder("tea").setItems("green", "builders").build()); indexFilters(filters); DataCounts earliestCounts = DataCountsTests.createTestInstance(jobId); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 454f941d6c8b0..42b0a56f49a82 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -210,7 +210,7 @@ public void testUpdateProcessOnFilterChanged() { JobManager jobManager = createJobManager(); - MlFilter filter = new MlFilter("foo_filter", Arrays.asList("a", "b")); + MlFilter filter = MlFilter.builder("foo_filter").setItems("a", "b").build(); jobManager.updateProcessOnFilterChanged(filter); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java index 8c32a5bb40d46..3d08f5a1c25fb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java @@ -207,8 +207,8 @@ public void testWriteUpdateDetectorRulesMessage() throws IOException { public void testWriteUpdateFiltersMessage() throws IOException { ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(lengthEncodedWriter, 2); - MlFilter filter1 = new MlFilter("filter_1", Arrays.asList("a")); - MlFilter filter2 = new MlFilter("filter_2", Arrays.asList("b", "c")); + MlFilter filter1 = MlFilter.builder("filter_1").setItems("a").build(); + MlFilter filter2 = MlFilter.builder("filter_2").setItems("b", "c").build(); writer.writeUpdateFiltersMessage(Arrays.asList(filter1, filter2)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/FieldConfigWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/FieldConfigWriterTests.java index bf08d09bf090c..d26dbb203c84e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/FieldConfigWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/FieldConfigWriterTests.java @@ -220,8 +220,8 @@ public void testWrite_GivenFilters() throws IOException { AnalysisConfig.Builder builder = new AnalysisConfig.Builder(Collections.singletonList(d)); analysisConfig = builder.build(); - filters.add(new MlFilter("filter_1", Arrays.asList("a", "b"))); - filters.add(new MlFilter("filter_2", Arrays.asList("c", "d"))); + filters.add(MlFilter.builder("filter_1").setItems("a", "b").build()); + filters.add(MlFilter.builder("filter_2").setItems("c", "d").build()); writer = mock(OutputStreamWriter.class); createFieldConfigWriter().write(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/MlFilterWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/MlFilterWriterTests.java index f22f7d85090be..12ceb12f46223 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/MlFilterWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/MlFilterWriterTests.java @@ -10,7 +10,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -28,8 +27,8 @@ public void testWrite_GivenEmpty() throws IOException { public void testWrite() throws IOException { List filters = new ArrayList<>(); - filters.add(new MlFilter("filter_1", Arrays.asList("a", "b"))); - filters.add(new MlFilter("filter_2", Arrays.asList("c", "d"))); + filters.add(MlFilter.builder("filter_1").setItems("a", "b").build()); + filters.add(MlFilter.builder("filter_2").setItems("c", "d").build()); StringBuilder buffer = new StringBuilder(); new MlFilterWriter(filters, buffer).write(); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index d3165260f4b95..a1f7eee0dcc3d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -32,6 +32,7 @@ setup: filter_id: filter-foo2 body: > { + "description": "This filter has a description", "items": ["123", "lmnop"] } @@ -76,6 +77,7 @@ setup: - match: filters.1: filter_id: "filter-foo2" + description: "This filter has a description" items: ["123", "lmnop"] - do: diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index aa53d6255cb8e..b99170546df3b 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -120,7 +120,7 @@ public void testCondition() throws Exception { } public void testScope() throws Exception { - MlFilter safeIps = new MlFilter("safe_ips", Arrays.asList("111.111.111.111", "222.222.222.222")); + MlFilter safeIps = MlFilter.builder("safe_ips").setItems("111.111.111.111", "222.222.222.222").build(); assertThat(putMlFilter(safeIps), is(true)); DetectionRule rule = new DetectionRule.Builder(RuleScope.builder().include("ip", "safe_ips")).build(); @@ -178,7 +178,7 @@ public void testScope() throws Exception { assertThat(records.get(0).getOverFieldValue(), equalTo("333.333.333.333")); // Now let's update the filter - MlFilter updatedFilter = new MlFilter(safeIps.getId(), Collections.singletonList("333.333.333.333")); + MlFilter updatedFilter = MlFilter.builder(safeIps.getId()).setItems("333.333.333.333").build(); assertThat(putMlFilter(updatedFilter), is(true)); // Wait until the notification that the process was updated is indexed @@ -229,7 +229,7 @@ public void testScope() throws Exception { public void testScopeAndCondition() throws IOException { // We have 2 IPs and they're both safe-listed. List ips = Arrays.asList("111.111.111.111", "222.222.222.222"); - MlFilter safeIps = new MlFilter("safe_ips", ips); + MlFilter safeIps = MlFilter.builder("safe_ips").setItems(ips).build(); assertThat(putMlFilter(safeIps), is(true)); // Ignore if ip in safe list AND actual < 10. From 7e43acd4457ae0f61cffea3dc17a154c448c5217 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 15 Jun 2018 16:29:09 +0100 Subject: [PATCH 02/41] [ML] Hold ML filter items in sorted set (#31338) Filter items should be unique. They should also be sorted to make them easier to read plus save sorting in the autodetect process. --- .../xpack/core/ml/job/config/MlFilter.java | 21 +++++++++++-------- .../core/ml/job/config/MlFilterTests.java | 19 ++++++++++++----- .../rest-api-spec/test/ml/filter_crud.yml | 2 +- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index 991f421265ea8..b11dfd476515c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -17,11 +17,12 @@ import org.elasticsearch.xpack.core.ml.MlMetaIndex; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; public class MlFilter implements ToXContentObject, Writeable { @@ -53,9 +54,9 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final String id; private final String description; - private final List items; + private final SortedSet items; - public MlFilter(String id, String description, List items) { + public MlFilter(String id, String description, SortedSet items) { this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); @@ -68,7 +69,8 @@ public MlFilter(StreamInput in) throws IOException { } else { description = null; } - items = Arrays.asList(in.readStringArray()); + items = new TreeSet<>(); + items.addAll(Arrays.asList(in.readStringArray())); } @Override @@ -103,8 +105,8 @@ public String getDescription() { return description; } - public List getItems() { - return new ArrayList<>(items); + public SortedSet getItems() { + return Collections.unmodifiableSortedSet(items); } @Override @@ -142,7 +144,7 @@ public static class Builder { private String id; private String description; - private List items = Collections.emptyList(); + private SortedSet items = new TreeSet<>(); private Builder() {} @@ -162,12 +164,13 @@ public Builder setDescription(String description) { } public Builder setItems(List items) { - this.items = items; + this.items = new TreeSet<>(); + this.items.addAll(items); return this; } public Builder setItems(String... items) { - this.items = Arrays.asList(items); + setItems(Arrays.asList(items)); return this; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index 78d87b82839a2..9ac6683f004c5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -11,10 +11,9 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.TreeSet; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +39,7 @@ public static MlFilter createRandom(String filterId) { } int size = randomInt(10); - List items = new ArrayList<>(size); + TreeSet items = new TreeSet<>(); for (int i = 0; i < size; i++) { items.add(randomAlphaOfLengthBetween(1, 20)); } @@ -58,7 +57,7 @@ protected MlFilter doParseInstance(XContentParser parser) { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", Collections.emptyList())); + NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", new TreeSet<>())); assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); } @@ -88,4 +87,14 @@ public void testLenientParser() throws IOException { MlFilter.LENIENT_PARSER.apply(parser, null); } } + + public void testItemsAreSorted() { + MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); + assertThat(filter.getItems(), contains("a", "b", "c")); + } + + public void testGetItemsReturnsUnmodifiable() { + MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); + expectThrows(UnsupportedOperationException.class, () -> filter.getItems().add("x")); + } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index a1f7eee0dcc3d..2b7b86673e0d0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -22,7 +22,7 @@ setup: filter_id: filter-foo body: > { - "items": ["abc", "xyz"] + "items": ["xyz", "abc"] } - do: From 4bcdcbd02e214605a67fdec8f5ad5c2c1a0322b4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 15 Jun 2018 15:49:14 +0200 Subject: [PATCH 03/41] Add QA project and fixture based test for discovery-ec2 plugin (#31107) This commit adds a new QA sub project to the discovery-ec2 plugin. This project uses a fixture to test the plugin using a multi-node cluster. Once all nodes are started, the nodes transport addresses are written in a file that is later read by the fixture. --- plugins/discovery-ec2/build.gradle | 5 + .../discovery-ec2/qa/amazon-ec2/build.gradle | 72 +++++ ...azonEC2DiscoveryClientYamlTestSuiteIT.java | 37 +++ .../discovery/ec2/AmazonEC2Fixture.java | 194 ++++++++++++++ .../test/discovery_ec2/10_basic.yml | 15 ++ plugins/discovery-ec2/qa/build.gradle | 0 .../Ec2DiscoveryClusterFormationTests.java | 252 ------------------ 7 files changed, 323 insertions(+), 252 deletions(-) create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/build.gradle create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java create mode 100644 plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml create mode 100644 plugins/discovery-ec2/qa/build.gradle delete mode 100644 plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 7daf944f81898..b1c3b62fd6edf 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -53,6 +53,11 @@ test { systemProperty 'tests.artifact', project.name } +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-ec2:check' +} + thirdPartyAudit.excludes = [ // classes are missing 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle new file mode 100644 index 0000000000000..8e0962db6037c --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:discovery-ec2', configuration: 'runtime') +} + +final int ec2NumberOfNodes = 3 +File ec2DiscoveryFile = new File(project.buildDir, 'generated-resources/nodes.uri') + +/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/ +task ec2Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, ec2DiscoveryFile.absolutePath +} + +Map expansions = [ + 'expected_nodes': ec2NumberOfNodes +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + dependsOn ec2Fixture + numNodes = ec2NumberOfNodes + plugin ':plugins:discovery-ec2' + keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key' + keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' + setting 'discovery.zen.hosts_provider', 'ec2' + setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}" + unicastTransportUri = { seedNode, node, ant -> return null } + + waitCondition = { node, ant -> + ec2DiscoveryFile.parentFile.mkdirs() + ec2DiscoveryFile.setText(integTest.nodes.collect { n -> "${n.transportUri()}" }.join('\n'), 'UTF-8') + + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health", + dest: tmpFile.toString(), + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..09d5a8d6fdf28 --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonEC2DiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonEC2DiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java new file mode 100644 index 0000000000000..0cf4cbdeadb34 --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.ec2; + +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.fixture.AbstractHttpFixture; + +import javax.xml.XMLConstants; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamWriter; +import java.io.IOException; +import java.io.StringWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.UUID; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * {@link AmazonEC2Fixture} is a fixture that emulates an AWS EC2 service. + */ +public class AmazonEC2Fixture extends AbstractHttpFixture { + + private final Path nodes; + + private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) { + super(workingDir); + this.nodes = toPath(Objects.requireNonNull(nodesUriPath)); + } + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonEC2Fixture "); + } + + final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]); + fixture.listen(); + } + + @Override + protected Response handle(final Request request) throws IOException { + if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) { + final String userAgent = request.getHeader("User-Agent"); + if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { + // Simulate an EC2 DescribeInstancesResponse + byte[] responseBody = EMPTY_BYTE; + for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) { + if ("Action".equals(parse.getName())) { + responseBody = generateDescribeInstancesResponse(); + break; + } + } + return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody); + } + } + return null; + } + + /** + * Generates a XML response that describe the EC2 instances + */ + private byte[] generateDescribeInstancesResponse() { + final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); + xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); + + final StringWriter out = new StringWriter(); + XMLStreamWriter sw; + try { + sw = xmlOutputFactory.createXMLStreamWriter(out); + sw.writeStartDocument(); + + String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; + sw.setDefaultNamespace(namespace); + sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); + { + sw.writeStartElement("requestId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("reservationSet"); + { + if (Files.exists(nodes)) { + for (String address : Files.readAllLines(nodes)) { + + sw.writeStartElement("item"); + { + sw.writeStartElement("reservationId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instancesSet"); + { + sw.writeStartElement("item"); + { + sw.writeStartElement("instanceId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("imageId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instanceState"); + { + sw.writeStartElement("code"); + sw.writeCharacters("16"); + sw.writeEndElement(); + + sw.writeStartElement("name"); + sw.writeCharacters("running"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateDnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("dnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("instanceType"); + sw.writeCharacters("m1.medium"); + sw.writeEndElement(); + + sw.writeStartElement("placement"); + { + sw.writeStartElement("availabilityZone"); + sw.writeCharacters("use-east-1e"); + sw.writeEndElement(); + + sw.writeEmptyElement("groupName"); + + sw.writeStartElement("tenancy"); + sw.writeCharacters("default"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateIpAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("ipAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + } + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeEndDocument(); + sw.flush(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return out.toString().getBytes(UTF_8); + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path toPath(final String dir) { + return Paths.get(dir); + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml new file mode 100644 index 0000000000000..682327b72dd9e --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml @@ -0,0 +1,15 @@ +# Integration tests for discovery-ec2 +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: ${expected_nodes} + +--- +"All nodes are correctly discovered": + + - do: + nodes.info: + metric: [ transport ] + + - match: { _nodes.total: ${expected_nodes} } diff --git a/plugins/discovery-ec2/qa/build.gradle b/plugins/discovery-ec2/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java deleted file mode 100644 index 49fd9de71ecfa..0000000000000 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.ec2; - -import com.amazonaws.util.IOUtils; -import com.sun.net.httpserver.Headers; -import com.sun.net.httpserver.HttpServer; -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.mocksocket.MockHttpServer; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import javax.xml.XMLConstants; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; -import java.io.IOException; -import java.io.OutputStream; -import java.io.StringWriter; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) -@SuppressForbidden(reason = "use http server") -// TODO this should be a IT but currently all ITs in this project run against a real cluster -public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase { - - private static HttpServer httpServer; - private static Path logDir; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(Ec2DiscoveryPlugin.class); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Path resolve = logDir.resolve(Integer.toString(nodeOrdinal)); - try { - Files.createDirectory(resolve); - } catch (IOException e) { - throw new RuntimeException(e); - } - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(AwsEc2Service.ACCESS_KEY_SETTING.getKey(), "some_access"); - secureSettings.setString(AwsEc2Service.SECRET_KEY_SETTING.getKey(), "some_secret"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "ec2") - .put("path.logs", resolve) - .put("transport.tcp.port", 0) - .put("node.portsfile", "true") - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" + - httpServer.getAddress().getPort()) - .setSecureSettings(secureSettings) - .build(); - } - - /** - * Creates mock EC2 endpoint providing the list of started nodes to the DescribeInstances API call - */ - @BeforeClass - public static void startHttpd() throws Exception { - logDir = createTempDir(); - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - - httpServer.createContext("/", (s) -> { - Headers headers = s.getResponseHeaders(); - headers.add("Content-Type", "text/xml; charset=UTF-8"); - String action = null; - for (NameValuePair parse : URLEncodedUtils.parse(IOUtils.toString(s.getRequestBody()), StandardCharsets.UTF_8)) { - if ("Action".equals(parse.getName())) { - action = parse.getValue(); - break; - } - } - assertThat(action, equalTo("DescribeInstances")); - - XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); - xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); - StringWriter out = new StringWriter(); - XMLStreamWriter sw; - try { - sw = xmlOutputFactory.createXMLStreamWriter(out); - sw.writeStartDocument(); - - String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; - sw.setDefaultNamespace(namespace); - sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); - { - sw.writeStartElement("requestId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("reservationSet"); - { - Path[] files = FileSystemUtils.files(logDir); - for (int i = 0; i < files.length; i++) { - Path resolve = files[i].resolve("transport.ports"); - if (Files.exists(resolve)) { - List addresses = Files.readAllLines(resolve); - Collections.shuffle(addresses, random()); - - sw.writeStartElement("item"); - { - sw.writeStartElement("reservationId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instancesSet"); - { - sw.writeStartElement("item"); - { - sw.writeStartElement("instanceId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("imageId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instanceState"); - { - sw.writeStartElement("code"); - sw.writeCharacters("16"); - sw.writeEndElement(); - - sw.writeStartElement("name"); - sw.writeCharacters("running"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateDnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("dnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("instanceType"); - sw.writeCharacters("m1.medium"); - sw.writeEndElement(); - - sw.writeStartElement("placement"); - { - sw.writeStartElement("availabilityZone"); - sw.writeCharacters("use-east-1e"); - sw.writeEndElement(); - - sw.writeEmptyElement("groupName"); - - sw.writeStartElement("tenancy"); - sw.writeCharacters("default"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateIpAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("ipAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - } - } - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeEndDocument(); - sw.flush(); - - final byte[] responseAsBytes = out.toString().getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(200, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - } catch (XMLStreamException e) { - Loggers.getLogger(Ec2DiscoveryClusterFormationTests.class).error("Failed serializing XML", e); - throw new RuntimeException(e); - } - }); - - httpServer.start(); - } - - @AfterClass - public static void stopHttpd() throws IOException { - for (int i = 0; i < internalCluster().size(); i++) { - // shut them all down otherwise we get spammed with connection refused exceptions - internalCluster().stopRandomDataNode(); - } - httpServer.stop(0); - httpServer = null; - logDir = null; - } - - public void testJoin() throws ExecutionException, InterruptedException { - // only wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); - // add one more node and wait for it to join - internalCluster().startDataOnlyNode(); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); - } -} From 3247012f5c1e8cd03d79284858f78fca4d117d30 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 11 Jun 2018 17:07:27 -0400 Subject: [PATCH 04/41] LLClient: Support host selection (#30523) Allows users of the Low Level REST client to specify which hosts a request should be run on. They implement the `NodeSelector` interface or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes are valid. Using it looks like: ``` Request request = new Request("POST", "/foo/_search"); RequestOptions options = request.getOptions().toBuilder(); options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); request.setOptions(options); ... ``` This introduces a new `Node` object which contains a `HttpHost` and the metadata about the host. At this point that metadata is just `version` and `roles` but I plan to add node attributes in a followup. The canonical way to **get** this metadata is to use the `Sniffer` to pull the information from the Elasticsearch cluster. I've marked this as "breaking-java" because it breaks custom implementations of `HostsSniffer` by renaming the interface to `NodesSniffer` and by changing it from returning a `List` to a `List`. It *shouldn't* break anyone else though. Because we expect to find it useful, this also implements `host_selector` support to `do` statements in the yaml tests. Using it looks a little like: ``` --- "example test": - skip: features: host_selector - do: host_selector: version: " - 7.0.0" # same syntax as skip apiname: something: true ``` The `do` section parses the `version` string into a host selector that uses the same version comparison logic as the `skip` section. When the `do` section is executed it passed the off to the `RestClient`, using the `ElasticsearchHostsSniffer` to sniff the required metadata. The idea is to use this in mixed version tests to target a specific version of Elasticsearch so we can be sure about the deprecation logging though we don't currently have any examples that need it. We do, however, have at least one open pull request that requires something like this to properly test it. Closes #21888 --- .../elasticsearch/client/DeadHostState.java | 19 +- .../java/org/elasticsearch/client/Node.java | 213 +++++++++++ .../elasticsearch/client/NodeSelector.java | 90 +++++ .../elasticsearch/client/RequestLogger.java | 6 +- .../elasticsearch/client/RequestOptions.java | 36 +- .../org/elasticsearch/client/Response.java | 2 +- .../org/elasticsearch/client/RestClient.java | 335 +++++++++++++----- .../client/RestClientBuilder.java | 24 +- .../client/DeadHostStateTests.java | 33 +- .../client/HostsTrackingFailureListener.java | 17 +- .../client/NodeSelectorTests.java | 71 ++++ .../org/elasticsearch/client/NodeTests.java | 71 ++++ .../client/RequestOptionsTests.java | 9 +- .../client/RestClientBuilderTests.java | 31 +- .../RestClientMultipleHostsIntegTests.java | 64 +++- .../client/RestClientMultipleHostsTests.java | 106 ++++-- .../client/RestClientSingleHostTests.java | 16 +- .../elasticsearch/client/RestClientTests.java | 225 ++++++++++-- .../RestClientDocumentation.java | 43 ++- ...er.java => ElasticsearchNodesSniffer.java} | 146 ++++++-- .../{HostsSniffer.java => NodesSniffer.java} | 8 +- .../client/sniff/SniffOnFailureListener.java | 4 +- .../elasticsearch/client/sniff/Sniffer.java | 28 +- .../client/sniff/SnifferBuilder.java | 18 +- .../ElasticsearchNodesSnifferParseTests.java | 109 ++++++ ...va => ElasticsearchNodesSnifferTests.java} | 111 +++--- ...ostsSniffer.java => MockNodesSniffer.java} | 9 +- .../sniff/SniffOnFailureListenerTests.java | 5 +- .../client/sniff/SnifferBuilderTests.java | 6 +- .../client/sniff/SnifferTests.java | 110 +++--- .../documentation/SnifferDocumentation.java | 29 +- .../src/test/resources/2.0.0_nodes_http.json | 141 ++++++++ .../src/test/resources/5.0.0_nodes_http.json | 169 +++++++++ .../src/test/resources/6.0.0_nodes_http.json | 169 +++++++++ client/sniffer/src/test/resources/readme.txt | 4 + .../high-level/getting-started.asciidoc | 10 + docs/java-rest/low-level/sniffer.asciidoc | 10 +- docs/java-rest/low-level/usage.asciidoc | 35 +- .../rest-api-spec/test/README.asciidoc | 18 + test/framework/build.gradle | 1 + .../rest/yaml/ClientYamlDocsTestClient.java | 21 +- .../test/rest/yaml/ClientYamlTestClient.java | 44 ++- .../yaml/ClientYamlTestExecutionContext.java | 18 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 29 +- .../test/rest/yaml/Features.java | 1 + .../test/rest/yaml/parser/package-info.java | 24 -- .../rest/yaml/section/ApiCallSection.java | 17 + .../yaml/section/ClientYamlTestSection.java | 7 + .../test/rest/yaml/section/DoSection.java | 78 +++- .../test/rest/yaml/section/SkipSection.java | 2 +- .../ClientYamlTestExecutionContextTests.java | 4 +- .../section/ClientYamlTestSectionTests.java | 31 +- .../rest/yaml/section/DoSectionTests.java | 45 +++ .../exporter/http/HttpExporter.java | 12 +- .../exporter/http/NodeFailureListener.java | 6 +- .../exporter/http/HttpExporterTests.java | 7 +- .../http/NodeFailureListenerTests.java | 9 +- 57 files changed, 2433 insertions(+), 473 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/Node.java create mode 100644 client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/NodeTests.java rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSniffer.java => ElasticsearchNodesSniffer.java} (50%) rename client/sniffer/src/main/java/org/elasticsearch/client/sniff/{HostsSniffer.java => NodesSniffer.java} (85%) create mode 100644 client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{ElasticsearchHostsSnifferTests.java => ElasticsearchNodesSnifferTests.java} (76%) rename client/sniffer/src/test/java/org/elasticsearch/client/sniff/{MockHostsSniffer.java => MockNodesSniffer.java} (78%) create mode 100644 client/sniffer/src/test/resources/2.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/5.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/6.0.0_nodes_http.json create mode 100644 client/sniffer/src/test/resources/readme.txt delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 452e71b14d93a..2a62adb285ad6 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -29,7 +29,7 @@ final class DeadHostState implements Comparable { private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); - private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); private final int failedAttempts; private final long deadUntilNanos; @@ -55,12 +55,12 @@ final class DeadHostState implements Comparable { * * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt */ - DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) { + DeadHostState(DeadHostState previousDeadHostState) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos; + this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; - this.timeSupplier = timeSupplier; + this.timeSupplier = previousDeadHostState.timeSupplier; } /** @@ -86,6 +86,10 @@ int getFailedAttempts() { @Override public int compareTo(DeadHostState other) { + if (timeSupplier != other.timeSupplier) { + throw new IllegalArgumentException("can't compare DeadHostStates with different clocks [" + + timeSupplier + " != " + other.timeSupplier + "]"); + } return Long.compare(deadUntilNanos, other.deadUntilNanos); } @@ -94,6 +98,7 @@ public String toString() { return "DeadHostState{" + "failedAttempts=" + failedAttempts + ", deadUntilNanos=" + deadUntilNanos + + ", timeSupplier=" + timeSupplier + '}'; } @@ -101,12 +106,16 @@ public String toString() { * Time supplier that makes timing aspects pluggable to ease testing */ interface TimeSupplier { - TimeSupplier DEFAULT = new TimeSupplier() { @Override public long nanoTime() { return System.nanoTime(); } + + @Override + public String toString() { + return "nanoTime"; + } }; long nanoTime(); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java new file mode 100644 index 0000000000000..d66d0773016e6 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Objects; +import java.util.Set; + +import org.apache.http.HttpHost; + +/** + * Metadata about an {@link HttpHost} running Elasticsearch. + */ +public class Node { + /** + * Address that this host claims is its primary contact point. + */ + private final HttpHost host; + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + private final Set boundHosts; + /** + * Name of the node as configured by the {@code node.name} attribute. + */ + private final String name; + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + private final String version; + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + private final Roles roles; + + /** + * Create a {@linkplain Node} with metadata. All parameters except + * {@code host} are nullable and implementations of {@link NodeSelector} + * need to decide what to do in their absence. + */ + public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + if (host == null) { + throw new IllegalArgumentException("host cannot be null"); + } + this.host = host; + this.boundHosts = boundHosts; + this.name = name; + this.version = version; + this.roles = roles; + } + + /** + * Create a {@linkplain Node} without any metadata. + */ + public Node(HttpHost host) { + this(host, null, null, null, null); + } + + /** + * Contact information for the host. + */ + public HttpHost getHost() { + return host; + } + + /** + * Addresses on which the host is listening. These are useful to have + * around because they allow you to find a host based on any address it + * is listening on. + */ + public Set getBoundHosts() { + return boundHosts; + } + + /** + * The {@code node.name} of the node. + */ + public String getName() { + return name; + } + + /** + * Version of Elasticsearch that the node is running or {@code null} + * if we don't know the version. + */ + public String getVersion() { + return version; + } + + /** + * Roles that the Elasticsearch process on the host has or {@code null} + * if we don't know what roles the node has. + */ + public Roles getRoles() { + return roles; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("[host=").append(host); + if (boundHosts != null) { + b.append(", bound=").append(boundHosts); + } + if (name != null) { + b.append(", name=").append(name); + } + if (version != null) { + b.append(", version=").append(version); + } + if (roles != null) { + b.append(", roles=").append(roles); + } + return b.append(']').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Node other = (Node) obj; + return host.equals(other.host) + && Objects.equals(boundHosts, other.boundHosts) + && Objects.equals(name, other.name) + && Objects.equals(version, other.version) + && Objects.equals(roles, other.roles); + } + + @Override + public int hashCode() { + return Objects.hash(host, boundHosts, name, version, roles); + } + + /** + * Role information about an Elasticsearch process. + */ + public static final class Roles { + private final boolean masterEligible; + private final boolean data; + private final boolean ingest; + + public Roles(boolean masterEligible, boolean data, boolean ingest) { + this.masterEligible = masterEligible; + this.data = data; + this.ingest = ingest; + } + + /** + * Teturns whether or not the node could be elected master. + */ + public boolean isMasterEligible() { + return masterEligible; + } + /** + * Teturns whether or not the node stores data. + */ + public boolean isData() { + return data; + } + /** + * Teturns whether or not the node runs ingest pipelines. + */ + public boolean isIngest() { + return ingest; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(3); + if (masterEligible) result.append('m'); + if (data) result.append('d'); + if (ingest) result.append('i'); + return result.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Roles other = (Roles) obj; + return masterEligible == other.masterEligible + && data == other.data + && ingest == other.ingest; + } + + @Override + public int hashCode() { + return Objects.hash(masterEligible, data, ingest); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java new file mode 100644 index 0000000000000..5f5296fe16b13 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/NodeSelector.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Iterator; + +/** + * Selects nodes that can receive requests. Used to keep requests away + * from master nodes or to send them to nodes with a particular attribute. + * Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}. + */ +public interface NodeSelector { + /** + * Select the {@link Node}s to which to send requests. This is called with + * a mutable {@link Iterable} of {@linkplain Node}s in the order that the + * rest client would prefer to use them and implementers should remove + * nodes from the that should not receive the request. Implementers may + * iterate the nodes as many times as they need. + *

+ * This may be called twice per request: first for "living" nodes that + * have not been blacklisted by previous errors. If the selector removes + * all nodes from the list or if there aren't any living nodes then the + * {@link RestClient} will call this method with a list of "dead" nodes. + *

+ * Implementers should not rely on the ordering of the nodes. + */ + void select(Iterable nodes); + /* + * We were fairly careful with our choice of Iterable here. The caller has + * a List but reordering the list is likely to break round robin. Luckily + * Iterable doesn't allow any reordering. + */ + + /** + * Selector that matches any node. + */ + NodeSelector ANY = new NodeSelector() { + @Override + public void select(Iterable nodes) { + // Intentionally does nothing + } + + @Override + public String toString() { + return "ANY"; + } + }; + + /** + * Selector that matches any node that has metadata and doesn't + * have the {@code master} role OR it has the data {@code data} + * role. + */ + NodeSelector NOT_MASTER_ONLY = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getRoles() == null) continue; + if (node.getRoles().isMasterEligible() + && false == node.getRoles().isData() + && false == node.getRoles().isIngest()) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "NOT_MASTER_ONLY"; + } + }; +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java index 07ff89b7e3fb0..7c56a7edf97a9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -87,14 +87,14 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR /** * Logs a request that failed */ - static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) { + static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; try { - traceRequest = buildTraceRequest(request, host); + traceRequest = buildTraceRequest(request, node.getHost()); } catch (IOException e1) { tracer.trace("error while reading request for trace purposes", e); traceRequest = ""; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java index e31db17a336b0..97d150da3d3ff 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -37,18 +37,21 @@ */ public final class RequestOptions { public static final RequestOptions DEFAULT = new Builder( - Collections.

emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build(); + Collections.
emptyList(), NodeSelector.ANY, + HeapBufferedResponseConsumerFactory.DEFAULT).build(); private final List
headers; + private final NodeSelector nodeSelector; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private RequestOptions(Builder builder) { this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); + this.nodeSelector = builder.nodeSelector; this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; } public Builder toBuilder() { - Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory); + Builder builder = new Builder(headers, nodeSelector, httpAsyncResponseConsumerFactory); return builder; } @@ -59,6 +62,14 @@ public List
getHeaders() { return headers; } + /** + * The selector that chooses which nodes are valid destinations for + * {@link Request}s with these options. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + /** * The {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the @@ -82,6 +93,9 @@ public String toString() { b.append(headers.get(h).toString()); } } + if (nodeSelector != NodeSelector.ANY) { + b.append(", nodeSelector=").append(nodeSelector); + } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); } @@ -99,20 +113,24 @@ public boolean equals(Object obj) { RequestOptions other = (RequestOptions) obj; return headers.equals(other.headers) + && nodeSelector.equals(other.nodeSelector) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(headers, httpAsyncResponseConsumerFactory); + return Objects.hash(headers, nodeSelector, httpAsyncResponseConsumerFactory); } public static class Builder { private final List
headers; + private NodeSelector nodeSelector; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; - private Builder(List
headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + private Builder(List
headers, NodeSelector nodeSelector, + HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { this.headers = new ArrayList<>(headers); + this.nodeSelector = nodeSelector; this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; } @@ -133,7 +151,15 @@ public void addHeader(String name, String value) { } /** - * set the {@link HttpAsyncResponseConsumerFactory} used to create one + * Configure the selector that chooses which nodes are valid + * destinations for {@link Request}s with these options + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = Objects.requireNonNull(nodeSelector, "nodeSelector cannot be null"); + } + + /** + * Set the {@link HttpAsyncResponseConsumerFactory} used to create one * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 02aedb4765abe..39bbf769713b2 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -40,7 +40,7 @@ public class Response { Response(RequestLine requestLine, HttpHost host, HttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); - Objects.requireNonNull(host, "node cannot be null"); + Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); this.requestLine = requestLine; this.host = host; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 0e603c4069ae4..82039cab5d04c 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -46,10 +46,11 @@ import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.DeadHostState.TimeSupplier; -import javax.net.ssl.SSLHandshakeException; import java.io.Closeable; import java.io.IOException; +import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; @@ -57,11 +58,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -74,13 +74,16 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLHandshakeException; + +import static java.util.Collections.singletonList; /** * Client that connects to an Elasticsearch cluster through HTTP. *

* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults. * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later - * by calling {@link #setHosts(HttpHost...)}. + * by calling {@link #setNodes(Collection)}. *

* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and @@ -102,53 +105,93 @@ public class RestClient implements Closeable { final List

defaultHeaders; private final long maxRetryTimeoutMillis; private final String pathPrefix; - private final AtomicInteger lastHostIndex = new AtomicInteger(0); - private volatile HostTuple> hostTuple; + private final AtomicInteger lastNodeIndex = new AtomicInteger(0); private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private final FailureListener failureListener; + private volatile NodeTuple> nodeTuple; RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, - HttpHost[] hosts, String pathPrefix, FailureListener failureListener) { + List nodes, String pathPrefix, FailureListener failureListener) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); this.failureListener = failureListener; this.pathPrefix = pathPrefix; - setHosts(hosts); + setNodes(nodes); } /** * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Creates a new builder instance and sets the hosts that the client will send requests to. + *

+ * Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes. + * If you don't either one is fine. + */ + public static RestClientBuilder builder(Node... nodes) { + return new RestClientBuilder(nodes == null ? null : Arrays.asList(nodes)); + } + + /** + * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. + * Creates a new builder instance and sets the nodes that the client will send requests to. + *

+ * You can use this if you do not have metadata up front about the nodes. If you do, prefer + * {@link #builder(Node...)}. + * @see Node#Node(HttpHost) */ public static RestClientBuilder builder(HttpHost... hosts) { - return new RestClientBuilder(hosts); + return new RestClientBuilder(hostsToNodes(hosts)); } /** - * Replaces the hosts that the client communicates with. - * @see HttpHost + * Replaces the hosts with which the client communicates. + * + * @deprecated prefer {@link setNodes} because it allows you + * to set metadata for use with {@link NodeSelector}s */ - public synchronized void setHosts(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts must not be null nor empty"); + @Deprecated + public void setHosts(HttpHost... hosts) { + setNodes(hostsToNodes(hosts)); + } + + /** + * Replaces the nodes with which the client communicates. + */ + public synchronized void setNodes(Collection nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - Set httpHosts = new LinkedHashSet<>(); AuthCache authCache = new BasicAuthCache(); - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); - httpHosts.add(host); - authCache.put(host, new BasicScheme()); + + Map nodesByHost = new LinkedHashMap<>(); + for (Node node : nodes) { + Objects.requireNonNull(node, "node cannot be null"); + // TODO should we throw an IAE if we have two nodes with the same host? + nodesByHost.put(node.getHost(), node); + authCache.put(node.getHost(), new BasicScheme()); } - this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache); + this.nodeTuple = new NodeTuple<>( + Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache); this.blacklist.clear(); } + private static List hostsToNodes(HttpHost[] hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); + } + List nodes = new ArrayList<>(hosts.length); + for (int i = 0; i < hosts.length; i++) { + nodes.add(new Node(hosts[i])); + } + return nodes; + } + /** - * Returns the configured hosts + * Get the list of nodes that the client knows about. The list is + * unmodifiable. */ - public List getHosts() { - return new ArrayList<>(hostTuple.hosts); + public List getNodes() { + return nodeTuple.nodes; } /** @@ -434,7 +477,7 @@ public void performRequestAsync(String method, String endpoint, Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = requestParams.remove("ignore"); @@ -466,40 +509,40 @@ void performRequestAsyncNoCatch(Request request, ResponseListener listener) { setHeaders(httpRequest, request.getOptions().getHeaders()); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); long startTime = System.nanoTime(); - performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes, + performRequestAsync(startTime, nextNode(request.getOptions().getNodeSelector()), httpRequest, ignoreErrorCodes, request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); } - private void performRequestAsync(final long startTime, final HostTuple> hostTuple, final HttpRequestBase request, + private void performRequestAsync(final long startTime, final NodeTuple> nodeTuple, final HttpRequestBase request, final Set ignoreErrorCodes, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final FailureTrackingResponseListener listener) { - final HttpHost host = hostTuple.hosts.next(); + final Node node = nodeTuple.nodes.next(); //we stream the request body if the entity allows for it - final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); + final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(node.getHost(), request); final HttpAsyncResponseConsumer asyncResponseConsumer = httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); final HttpClientContext context = HttpClientContext.create(); - context.setAuthCache(hostTuple.authCache); + context.setAuthCache(nodeTuple.authCache); client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback() { @Override public void completed(HttpResponse httpResponse) { try { - RequestLogger.logResponse(logger, request, host, httpResponse); + RequestLogger.logResponse(logger, request, node.getHost(), httpResponse); int statusCode = httpResponse.getStatusLine().getStatusCode(); - Response response = new Response(request.getRequestLine(), host, httpResponse); + Response response = new Response(request.getRequestLine(), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { - onResponse(host); + onResponse(node); listener.onSuccess(response); } else { ResponseException responseException = new ResponseException(response); if (isRetryStatus(statusCode)) { //mark host dead and retry against next one - onFailure(host); + onFailure(node); retryIfPossible(responseException); } else { //mark host alive and don't retry, as the error should be a request problem - onResponse(host); + onResponse(node); listener.onDefinitiveFailure(responseException); } } @@ -511,8 +554,8 @@ public void completed(HttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request, host, failure); - onFailure(host); + RequestLogger.logFailedRequest(logger, request, node, failure); + onFailure(node); retryIfPossible(failure); } catch(Exception e) { listener.onDefinitiveFailure(e); @@ -520,7 +563,7 @@ public void failed(Exception failure) { } private void retryIfPossible(Exception exception) { - if (hostTuple.hosts.hasNext()) { + if (nodeTuple.nodes.hasNext()) { //in case we are retrying, check whether maxRetryTimeout has been reached long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeout = maxRetryTimeoutMillis - timeElapsedMillis; @@ -531,7 +574,7 @@ private void retryIfPossible(Exception exception) { } else { listener.trackFailure(exception); request.reset(); - performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); + performRequestAsync(startTime, nodeTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); } } else { listener.onDefinitiveFailure(exception); @@ -560,54 +603,103 @@ private void setHeaders(HttpRequest httpRequest, Collection

requestHeade } /** - * Returns an {@link Iterable} of hosts to be used for a request call. - * Ideally, the first host is retrieved from the iterable and used successfully for the request. - * Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until - * there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable. - * The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried, - * one dead host gets returned so that it can be retried. + * Returns a non-empty {@link Iterator} of nodes to be used for a request + * that match the {@link NodeSelector}. + *

+ * If there are no living nodes that match the {@link NodeSelector} + * this will return the dead node that matches the {@link NodeSelector} + * that is closest to being revived. + * @throws IOException if no nodes are available */ - private HostTuple> nextHost() { - final HostTuple> hostTuple = this.hostTuple; - Collection nextHosts = Collections.emptySet(); - do { - Set filteredHosts = new HashSet<>(hostTuple.hosts); - for (Map.Entry entry : blacklist.entrySet()) { - if (entry.getValue().shallBeRetried() == false) { - filteredHosts.remove(entry.getKey()); - } + private NodeTuple> nextNode(NodeSelector nodeSelector) throws IOException { + NodeTuple> nodeTuple = this.nodeTuple; + List hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector); + return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache); + } + + /** + * Select hosts to try. Package private for testing. + */ + static List selectHosts(NodeTuple> nodeTuple, + Map blacklist, AtomicInteger lastNodeIndex, + NodeSelector nodeSelector) throws IOException { + /* + * Sort the nodes into living and dead lists. + */ + List livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); + List deadNodes = new ArrayList<>(blacklist.size()); + for (Node node : nodeTuple.nodes) { + DeadHostState deadness = blacklist.get(node.getHost()); + if (deadness == null) { + livingNodes.add(node); + continue; } - if (filteredHosts.isEmpty()) { - //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried - List> sortedHosts = new ArrayList<>(blacklist.entrySet()); - if (sortedHosts.size() > 0) { - Collections.sort(sortedHosts, new Comparator>() { - @Override - public int compare(Map.Entry o1, Map.Entry o2) { - return o1.getValue().compareTo(o2.getValue()); - } - }); - HttpHost deadHost = sortedHosts.get(0).getKey(); - logger.trace("resurrecting host [" + deadHost + "]"); - nextHosts = Collections.singleton(deadHost); + if (deadness.shallBeRetried()) { + livingNodes.add(node); + continue; + } + deadNodes.add(new DeadNode(node, deadness)); + } + + if (false == livingNodes.isEmpty()) { + /* + * Normal state: there is at least one living node. If the + * selector is ok with any over the living nodes then use them + * for the request. + */ + List selectedLivingNodes = new ArrayList<>(livingNodes); + nodeSelector.select(selectedLivingNodes); + if (false == selectedLivingNodes.isEmpty()) { + /* + * Rotate the list so subsequent requests will prefer the + * nodes in a different order. + */ + Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement()); + return selectedLivingNodes; + } + } + + /* + * Last resort: If there are no good nodes to use, either because + * the selector rejected all the living nodes or because there aren't + * any living ones. Either way, we want to revive a single dead node + * that the NodeSelectors are OK with. We do this by sorting the dead + * nodes by their revival time and passing them through the + * NodeSelector so it can have its say in which nodes are ok and their + * ordering. If the selector is ok with any of the nodes then use just + * the first one in the list because we only want to revive a single + * node. + */ + if (false == deadNodes.isEmpty()) { + final List selectedDeadNodes = new ArrayList<>(deadNodes); + /* + * We'd like NodeSelectors to remove items directly from deadNodes + * so we can find the minimum after it is filtered without having + * to compare many things. This saves us a sort on the unfiltered + * list. + */ + nodeSelector.select(new Iterable() { + @Override + public Iterator iterator() { + return new DeadNodeIteratorAdapter(selectedDeadNodes.iterator()); } - } else { - List rotatedHosts = new ArrayList<>(filteredHosts); - Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); - nextHosts = rotatedHosts; + }); + if (false == selectedDeadNodes.isEmpty()) { + return singletonList(Collections.min(selectedDeadNodes).node); } - } while(nextHosts.isEmpty()); - return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache); + } + throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, " + + "living " + livingNodes + " and dead " + deadNodes); } /** * Called after each successful request call. * Receives as an argument the host that was used for the successful request. */ - private void onResponse(HttpHost host) { - DeadHostState removedHost = this.blacklist.remove(host); + private void onResponse(Node node) { + DeadHostState removedHost = this.blacklist.remove(node.getHost()); if (logger.isDebugEnabled() && removedHost != null) { - logger.debug("removed host [" + host + "] from blacklist"); + logger.debug("removed [" + node + "] from blacklist"); } } @@ -615,20 +707,25 @@ private void onResponse(HttpHost host) { * Called after each failed attempt. * Receives as an argument the host that was used for the failed attempt. */ - private void onFailure(HttpHost host) { + private void onFailure(Node node) { while(true) { - DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT)); + DeadHostState previousDeadHostState = + blacklist.putIfAbsent(node.getHost(), new DeadHostState(TimeSupplier.DEFAULT)); if (previousDeadHostState == null) { - logger.debug("added host [" + host + "] to blacklist"); + if (logger.isDebugEnabled()) { + logger.debug("added [" + node + "] to blacklist"); + } break; } - if (blacklist.replace(host, previousDeadHostState, - new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) { - logger.debug("updated host [" + host + "] already in blacklist"); + if (blacklist.replace(node.getHost(), previousDeadHostState, + new DeadHostState(previousDeadHostState))) { + if (logger.isDebugEnabled()) { + logger.debug("updated [" + node + "] already in blacklist"); + } break; } } - failureListener.onFailure(host); + failureListener.onFailure(node); } @Override @@ -840,6 +937,11 @@ Response get() throws IOException { e.initCause(exception); throw e; } + if (exception instanceof ConnectException) { + ConnectException e = new ConnectException(exception.getMessage()); + e.initCause(exception); + throw e; + } if (exception instanceof IOException) { throw new IOException(exception.getMessage(), exception); } @@ -862,27 +964,76 @@ Response get() throws IOException { */ public static class FailureListener { /** - * Notifies that the host provided as argument has just failed + * Notifies that the node provided as argument has just failed */ - public void onFailure(HttpHost host) { - - } + public void onFailure(Node node) {} } /** - * {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread - * safe, volatile way. + * {@link NodeTuple} enables the {@linkplain Node}s and {@linkplain AuthCache} + * to be set together in a thread safe, volatile way. */ - private static class HostTuple { - final T hosts; + static class NodeTuple { + final T nodes; final AuthCache authCache; - HostTuple(final T hosts, final AuthCache authCache) { - this.hosts = hosts; + NodeTuple(final T nodes, final AuthCache authCache) { + this.nodes = nodes; this.authCache = authCache; } } + /** + * Contains a reference to a blacklisted node and the time until it is + * revived. We use this so we can do a single pass over the blacklist. + */ + private static class DeadNode implements Comparable { + final Node node; + final DeadHostState deadness; + + DeadNode(Node node, DeadHostState deadness) { + this.node = node; + this.deadness = deadness; + } + + @Override + public String toString() { + return node.toString(); + } + + @Override + public int compareTo(DeadNode rhs) { + return deadness.compareTo(rhs.deadness); + } + } + + /** + * Adapts an Iterator into an + * Iterator. + */ + private static class DeadNodeIteratorAdapter implements Iterator { + private final Iterator itr; + + private DeadNodeIteratorAdapter(Iterator itr) { + this.itr = itr; + } + + @Override + public boolean hasNext() { + return itr.hasNext(); + } + + @Override + public Node next() { + return itr.next().node; + } + + @Override + public void remove() { + itr.remove(); + } + } + /** * Add all headers from the provided varargs argument to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 5f7831c67fc28..17d27248dfea9 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; @@ -32,6 +31,7 @@ import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; +import java.util.List; import java.util.Objects; /** @@ -48,7 +48,7 @@ public final class RestClientBuilder { private static final Header[] EMPTY_HEADERS = new Header[0]; - private final HttpHost[] hosts; + private final List nodes; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; private RestClient.FailureListener failureListener; @@ -59,18 +59,18 @@ public final class RestClientBuilder { /** * Creates a new builder instance and sets the hosts that the client will send requests to. * - * @throws NullPointerException if {@code hosts} or any host is {@code null}. - * @throws IllegalArgumentException if {@code hosts} is empty. + * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty. */ - RestClientBuilder(HttpHost... hosts) { - Objects.requireNonNull(hosts, "hosts must not be null"); - if (hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); + RestClientBuilder(List nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("nodes must not be null or empty"); } - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); + for (Node node : nodes) { + if (node == null) { + throw new IllegalArgumentException("node cannot be null"); + } } - this.hosts = hosts; + this.nodes = nodes; } /** @@ -186,7 +186,7 @@ public CloseableHttpAsyncClient run() { return createHttpClient(); } }); - RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); + RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener); httpClient.start(); return restClient; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index 75fbafd88f83c..daea27f896328 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -21,11 +21,15 @@ import java.util.concurrent.TimeUnit; +import org.elasticsearch.client.DeadHostState.TimeSupplier; + import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; public class DeadHostStateTests extends RestClientTestCase { @@ -42,7 +46,7 @@ public void testDeadHostStateFromPreviousDefaultTimeSupplier() { DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); previous = deadHostState; @@ -56,7 +60,7 @@ public void testCompareToDefaultTimeSupplier() { if (i == 0) { deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); } else { - deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT); + deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]); } } for (int k = 1; k < deadHostStates.length; k++) { @@ -65,6 +69,17 @@ public void testCompareToDefaultTimeSupplier() { } } + public void testCompareToDifferingTimeSupplier() { + try { + new DeadHostState(TimeSupplier.DEFAULT).compareTo( + new DeadHostState(new ConfigurableTimeSupplier())); + fail("expected failure"); + } catch (IllegalArgumentException e) { + assertEquals("can't compare DeadHostStates with different clocks [nanoTime != configured[0]]", + e.getMessage()); + } + } + public void testShallBeRetried() { ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); DeadHostState deadHostState = null; @@ -74,7 +89,7 @@ public void testShallBeRetried() { if (i == 0) { deadHostState = new DeadHostState(timeSupplier); } else { - deadHostState = new DeadHostState(deadHostState, timeSupplier); + deadHostState = new DeadHostState(deadHostState); } for (int j = 0; j < expectedTimeoutSecond; j++) { timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); @@ -94,25 +109,29 @@ public void testDeadHostStateTimeouts() { DeadHostState previous = new DeadHostState(zeroTimeSupplier); for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); - previous = new DeadHostState(previous, zeroTimeSupplier); + previous = new DeadHostState(previous); } //check that from here on the timeout does not increase int iters = randomIntBetween(5, 30); for (int i = 0; i < iters; i++) { - DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier); + DeadHostState deadHostState = new DeadHostState(previous); assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); previous = deadHostState; } } - private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { - + static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { long nanoTime; @Override public long nanoTime() { return nanoTime; } + + @Override + public String toString() { + return "configured[" + nanoTime + "]"; + } } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java index e2f0ba81f6ed7..6c952fcf94759 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/elasticsearch/client/HostsTrackingFailureListener.java @@ -22,6 +22,7 @@ import org.apache.http.HttpHost; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -29,14 +30,22 @@ import static org.junit.Assert.assertThat; /** - * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host. + * {@link RestClient.FailureListener} impl that allows to track when it gets called for which host. */ class HostsTrackingFailureListener extends RestClient.FailureListener { private volatile Set hosts = new HashSet<>(); @Override - public void onFailure(HttpHost host) { - hosts.add(host); + public void onFailure(Node node) { + hosts.add(node.getHost()); + } + + void assertCalled(List nodes) { + HttpHost[] hosts = new HttpHost[nodes.size()]; + for (int i = 0 ; i < nodes.size(); i++) { + hosts[i] = nodes.get(i).getHost(); + } + assertCalled(hosts); } void assertCalled(HttpHost... hosts) { @@ -48,4 +57,4 @@ void assertCalled(HttpHost... hosts) { void assertNotCalled() { assertEquals(0, hosts.size()); } -} \ No newline at end of file +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java new file mode 100644 index 0000000000000..d9df001ad437e --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class NodeSelectorTests extends RestClientTestCase { + public void testAny() { + List nodes = new ArrayList<>(); + int size = between(2, 5); + for (int i = 0; i < size; i++) { + nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean())); + } + List expected = new ArrayList<>(nodes); + NodeSelector.ANY.select(nodes); + assertEquals(expected, nodes); + } + + public void testNotMasterOnly() { + Node masterOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node masterAndData = dummyNode(true, true, false); + Node masterAndIngest = dummyNode(true, false, true); + Node coordinatingOnly = dummyNode(false, false, false); + Node ingestOnly = dummyNode(false, false, true); + Node data = dummyNode(false, true, randomBoolean()); + List nodes = new ArrayList<>(); + nodes.add(masterOnly); + nodes.add(all); + nodes.add(masterAndData); + nodes.add(masterAndIngest); + nodes.add(coordinatingOnly); + nodes.add(ingestOnly); + nodes.add(data); + Collections.shuffle(nodes, getRandom()); + List expected = new ArrayList<>(nodes); + expected.remove(masterOnly); + NodeSelector.NOT_MASTER_ONLY.select(nodes); + assertEquals(expected, nodes); + } + + private Node dummyNode(boolean master, boolean data, boolean ingest) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(master, data, ingest)); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java new file mode 100644 index 0000000000000..c6d60415b88dc --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.Arrays; +import java.util.HashSet; + +import static java.util.Collections.singleton; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class NodeTests extends RestClientTestCase { + public void testToString() { + assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), + null, null, null, new Roles(true, true, true)).toString()); + assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), + null, null, "ver", null).toString()); + assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), + null, "nam", null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); + assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), + "nam", "ver", new Roles(true, false, false)).toString()); + + } + + public void testEqualsAndHashCode() { + HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); + Node node = new Node(host, + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true)); + assertFalse(node.equals(null)); + assertTrue(node.equals(node)); + assertEquals(node.hashCode(), node.hashCode()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + assertTrue(node.equals(copy)); + assertEquals(node.hashCode(), copy.hashCode()); + assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), + node.getName(), node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java index 19106792228d9..a78be6c126bae 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java @@ -114,6 +114,10 @@ static RequestOptions.Builder randomBuilder() { } } + if (randomBoolean()) { + builder.setNodeSelector(mock(NodeSelector.class)); + } + if (randomBoolean()) { builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); } @@ -127,12 +131,15 @@ private static RequestOptions copy(RequestOptions options) { private static RequestOptions mutate(RequestOptions options) { RequestOptions.Builder mutant = options.toBuilder(); - int mutationType = between(0, 1); + int mutationType = between(0, 2); switch (mutationType) { case 0: mutant.addHeader("extra", "m"); return mutant.build(); case 1: + mutant.setNodeSelector(mock(NodeSelector.class)); + return mutant.build(); + case 2: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); return mutant.build(); default: diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 9657e782bda04..9fcb4978e28a7 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -39,21 +39,42 @@ public void testBuild() throws IOException { try { RestClient.builder((HttpHost[])null); fail("should have failed"); - } catch(NullPointerException e) { - assertEquals("hosts must not be null", e.getMessage()); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); } try { - RestClient.builder(); + RestClient.builder((Node[])null); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("no hosts provided", e.getMessage()); + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node[] {}); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + + try { + RestClient.builder(new Node(new HttpHost("localhost", 9200)), null); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("node cannot be null", e.getMessage()); } try { RestClient.builder(new HttpHost("localhost", 9200), null); fail("should have failed"); - } catch(NullPointerException e) { + } catch(IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index aa323276404cf..92a960090ad6a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -29,9 +29,11 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -42,6 +44,7 @@ import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. @@ -50,31 +53,37 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { private static HttpServer[] httpServers; - private static RestClient restClient; + private static HttpHost[] httpHosts; + private static boolean stoppedFirstHost = false; + private static String pathPrefixWithoutLeadingSlash; private static String pathPrefix; + private static RestClient restClient; @BeforeClass public static void startHttpServer() throws Exception { - String pathPrefixWithoutLeadingSlash; if (randomBoolean()) { - pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); + pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5); pathPrefix = "/" + pathPrefixWithoutLeadingSlash; } else { pathPrefix = pathPrefixWithoutLeadingSlash = ""; } int numHttpServers = randomIntBetween(2, 4); httpServers = new HttpServer[numHttpServers]; - HttpHost[] httpHosts = new HttpHost[numHttpServers]; + httpHosts = new HttpHost[numHttpServers]; for (int i = 0; i < numHttpServers; i++) { HttpServer httpServer = createHttpServer(); httpServers[i] = httpServer; httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); } + restClient = buildRestClient(); + } + + private static RestClient buildRestClient() { RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); } - restClient = restClientBuilder.build(); + return restClientBuilder.build(); } private static HttpServer createHttpServer() throws Exception { @@ -118,6 +127,9 @@ public void stopRandomHost() { if (httpServers.length > 1 && randomBoolean()) { List updatedHttpServers = new ArrayList<>(httpServers.length - 1); int nodeIndex = randomInt(httpServers.length - 1); + if (0 == nodeIndex) { + stoppedFirstHost = true; + } for (int i = 0; i < httpServers.length; i++) { HttpServer httpServer = httpServers[i]; if (i == nodeIndex) { @@ -182,6 +194,35 @@ public void onFailure(Exception exception) { } } + /** + * Test host selector against a real server and + * test what happens after calling + */ + public void testNodeSelector() throws IOException { + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(firstPositionNodeSelector()); + request.setOptions(options); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + if (stoppedFirstHost) { + try { + restClient.performRequest(request); + fail("expected to fail to connect"); + } catch (ConnectException e) { + assertEquals("Connection refused", e.getMessage()); + } + } else { + Response response = restClient.performRequest(request); + assertEquals(httpHosts[0], response.getHost()); + } + } + } + private static class TestResponse { private final String method; private final int statusCode; @@ -203,4 +244,17 @@ Response getResponse() { throw new AssertionError("unexpected response " + response.getClass()); } } + + private NodeSelector firstPositionNodeSelector() { + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if (httpHosts[0] != itr.next().getHost()) { + itr.remove(); + } + } + } + }; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index a3a834ff3204b..eb591f4ccff3a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -35,6 +35,7 @@ import org.apache.http.message.BasicStatusLine; import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.Node.Roles; import org.junit.After; import org.junit.Before; import org.mockito.invocation.InvocationOnMock; @@ -42,8 +43,11 @@ import java.io.IOException; import java.net.SocketTimeoutException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -71,7 +75,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; - private HttpHost[] httpHosts; + private List nodes; private HostsTrackingFailureListener failureListener; @Before @@ -108,13 +112,14 @@ public void run() { return null; } }); - int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); - httpHosts = new HttpHost[numHosts]; - for (int i = 0; i < numHosts; i++) { - httpHosts[i] = new HttpHost("localhost", 9200 + i); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5); + nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("localhost", 9200 + i))); } + nodes = Collections.unmodifiableList(nodes); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener); + restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener); } /** @@ -128,9 +133,8 @@ public void shutdownExec() { public void testRoundRobinOkStatusCodes() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { int statusCode = randomOkStatusCode(getRandom()); Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); assertEquals(statusCode, response.getStatusLine().getStatusCode()); @@ -144,9 +148,8 @@ public void testRoundRobinOkStatusCodes() throws IOException { public void testRoundRobinNoRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { String method = randomHttpMethod(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom()); try { @@ -185,10 +188,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (ResponseException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { Response response = e.getResponse(); assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode()); @@ -210,10 +212,9 @@ public void testRoundRobinRetryErrors() throws IOException { * the caller. It wraps the exception that contains the failed hosts. */ e = (IOException) e.getCause(); - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); + Set hostsSet = hostsSet(); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each - failureListener.assertCalled(httpHosts); + failureListener.assertCalled(nodes); do { HttpHost httpHost = HttpHost.create(e.getMessage()); assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); @@ -232,9 +233,8 @@ public void testRoundRobinRetryErrors() throws IOException { int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt - Set hostsSet = new HashSet<>(); - Collections.addAll(hostsSet, httpHosts); - for (int j = 0; j < httpHosts.length; j++) { + Set hostsSet = hostsSet(); + for (int j = 0; j < nodes.size(); j++) { retryEndpoint = randomErrorRetryEndpoint(); try { restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); @@ -308,6 +308,58 @@ public void testRoundRobinRetryErrors() throws IOException { } } + public void testNodeSelector() throws IOException { + NodeSelector firstPositionOnly = new NodeSelector() { + @Override + public void select(Iterable restClientNodes) { + boolean found = false; + for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { + if (nodes.get(0) == itr.next()) { + found = true; + } else { + itr.remove(); + } + } + assertTrue(found); + } + }; + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(firstPositionOnly); + request.setOptions(options); + Response response = restClient.performRequest(request); + assertEquals(nodes.get(0).getHost(), response.getHost()); + } + } + + public void testSetNodes() throws IOException { + List newNodes = new ArrayList<>(nodes.size()); + for (int i = 0; i < nodes.size(); i++) { + Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); + } + restClient.setNodes(newNodes); + int rounds = between(1, 10); + for (int i = 0; i < rounds; i++) { + /* + * Run the request more than once to verify that the + * NodeSelector overrides the round robin behavior. + */ + Request request = new Request("GET", "/200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + request.setOptions(options); + Response response = restClient.performRequest(request); + assertEquals(newNodes.get(0).getHost(), response.getHost()); + } + } + private static String randomErrorRetryEndpoint() { switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: @@ -321,4 +373,16 @@ private static String randomErrorRetryEndpoint() { } throw new UnsupportedOperationException(); } + + /** + * Build a mutable {@link Set} containing all the {@link Node#getHost() hosts} + * in use by the test. + */ + private Set hostsSet() { + Set hosts = new HashSet<>(); + for (Node node : nodes) { + hosts.add(node.getHost()); + } + return hosts; + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 634929c5de156..5987fe7dd9849 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -65,6 +65,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; @@ -94,7 +95,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { private ExecutorService exec = Executors.newFixedThreadPool(1); private RestClient restClient; private Header[] defaultHeaders; - private HttpHost httpHost; + private Node node; private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; @@ -108,7 +109,7 @@ public void createRestClient() { public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; - assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); + assertThat(context.getAuthCache().get(node.getHost()), instanceOf(BasicScheme.class)); final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); @@ -146,9 +147,10 @@ public void run() { }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); - httpHost = new HttpHost("localhost", 9200); + node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); - restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); + restClient = new RestClient(httpClient, 10000, defaultHeaders, + singletonList(node), null, failureListener); } /** @@ -244,7 +246,7 @@ public void testErrorStatusCodes() throws IOException { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { failureListener.assertNotCalled(); } else { - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); } } } @@ -259,14 +261,14 @@ public void testIOExceptions() { } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); try { performRequest(method, "/soe"); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); } - failureListener.assertCalled(httpHost); + failureListener.assertCalled(singletonList(node)); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 5fe5fcae78fee..01f6f308f6227 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -22,14 +22,23 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.elasticsearch.client.DeadHostStateTests.ConfigurableTimeSupplier; +import org.elasticsearch.client.RestClient.NodeTuple; import java.io.IOException; import java.net.URI; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -43,9 +52,9 @@ public class RestClientTests extends RestClientTestCase { public void testCloseIsIdempotent() throws IOException { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; + List nodes = singletonList(new Node(new HttpHost("localhost", 9200))); CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); - RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], hosts, null, null); + RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null); restClient.close(); verify(closeableHttpAsyncClient, times(1)).close(); restClient.close(); @@ -225,6 +234,7 @@ public void testBuildUriLeavesPathUntouched() { } } + @Deprecated public void testSetHostsWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost[]) null); @@ -241,45 +251,75 @@ public void testSetHostsWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setHosts((HttpHost) null); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } try (RestClient restClient = createRestClient()) { restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); fail("setHosts should have failed"); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { assertEquals("host cannot be null", e.getMessage()); } } - public void testSetHostsPreservesOrdering() throws Exception { + public void testSetNodesWrongArguments() throws IOException { + try (RestClient restClient = createRestClient()) { + restClient.setNodes(null); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Collections.emptyList()); + fail("setNodes should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("nodes must not be null or empty", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Collections.singletonList((Node) null)); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + try (RestClient restClient = createRestClient()) { + restClient.setNodes(Arrays.asList( + new Node(new HttpHost("localhost", 9200)), + null, + new Node(new HttpHost("localhost", 9201)))); + fail("setNodes should have failed"); + } catch (NullPointerException e) { + assertEquals("node cannot be null", e.getMessage()); + } + } + + public void testSetNodesPreservesOrdering() throws Exception { try (RestClient restClient = createRestClient()) { - HttpHost[] hosts = randomHosts(); - restClient.setHosts(hosts); - assertEquals(Arrays.asList(hosts), restClient.getHosts()); + List nodes = randomNodes(); + restClient.setNodes(nodes); + assertEquals(nodes, restClient.getNodes()); } } - private static HttpHost[] randomHosts() { - int numHosts = randomIntBetween(1, 10); - HttpHost[] hosts = new HttpHost[numHosts]; - for (int i = 0; i < hosts.length; i++) { - hosts[i] = new HttpHost("host-" + i, 9200); + private static List randomNodes() { + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + nodes.add(new Node(new HttpHost("host-" + i, 9200))); } - return hosts; + return nodes; } - public void testSetHostsDuplicatedHosts() throws Exception { + public void testSetNodesDuplicatedHosts() throws Exception { try (RestClient restClient = createRestClient()) { - int numHosts = randomIntBetween(1, 10); - HttpHost[] hosts = new HttpHost[numHosts]; - HttpHost host = new HttpHost("host", 9200); - for (int i = 0; i < hosts.length; i++) { - hosts[i] = host; + int numNodes = randomIntBetween(1, 10); + List nodes = new ArrayList<>(numNodes); + Node node = new Node(new HttpHost("host", 9200)); + for (int i = 0; i < numNodes; i++) { + nodes.add(node); } - restClient.setHosts(hosts); - assertEquals(1, restClient.getHosts().size()); - assertEquals(host, restClient.getHosts().get(0)); + restClient.setNodes(nodes); + assertEquals(1, restClient.getNodes().size()); + assertEquals(node, restClient.getNodes().get(0)); } } @@ -300,8 +340,143 @@ public void testNullPath() throws IOException { } } + public void testSelectHosts() throws IOException { + Node n1 = new Node(new HttpHost("1"), null, null, "1", null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + + NodeSelector not1 = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if ("1".equals(itr.next().getVersion())) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "NOT 1"; + } + }; + NodeSelector noNodes = new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + itr.next(); + itr.remove(); + } + } + + @Override + public String toString() { + return "NONE"; + } + }; + + NodeTuple> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null); + + Map emptyBlacklist = Collections.emptyMap(); + + // Normal cases where the node selector doesn't reject all living nodes + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1); + + /* + * Try a NodeSelector that excludes all nodes. This should + * throw an exception + */ + { + String message = "NodeSelector [NONE] rejected all nodes, living [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]] and dead []"; + assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes)); + } + + // Mark all the nodes dead for a few test cases + { + ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); + Map blacklist = new HashMap<>(); + blacklist.put(n1.getHost(), new DeadHostState(timeSupplier)); + blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier))); + blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier)))); + + /* + * selectHosts will revive a single host if regardless of + * blacklist time. It'll revive the node that is closest + * to being revived that the NodeSelector is ok with. + */ + assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY)); + assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), not1)); + + /* + * Try a NodeSelector that excludes all nodes. This should + * return a failure, but a different failure than when the + * blacklist is empty so that the caller knows that all of + * their nodes are blacklisted AND blocked. + */ + String message = "NodeSelector [NONE] rejected all nodes, living [] and dead [" + + "[host=http://1, version=1], [host=http://2, version=2], " + + "[host=http://3, version=3]]"; + assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes)); + + /* + * Now lets wind the clock forward, past the timeout for one of + * the dead nodes. We should return it. + */ + timeSupplier.nanoTime = new DeadHostState(timeSupplier).getDeadUntilNanos(); + assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY); + + /* + * But if the NodeSelector rejects that node then we'll pick the + * first on that the NodeSelector doesn't reject. + */ + assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1); + + /* + * If we wind the clock way into the future, past any of the + * blacklist timeouts then we function as though the nodes aren't + * in the blacklist at all. + */ + timeSupplier.nanoTime += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS; + assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1); + } + } + + private void assertSelectLivingHosts(List expectedNodes, NodeTuple> nodeTuple, + Map blacklist, NodeSelector nodeSelector) throws IOException { + int iterations = 1000; + AtomicInteger lastNodeIndex = new AtomicInteger(0); + assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); + // Calling it again rotates the set of results + for (int i = 1; i < iterations; i++) { + Collections.rotate(expectedNodes, 1); + assertEquals("iteration " + i, expectedNodes, + RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector)); + } + } + + /** + * Assert that {@link RestClient#selectHosts} fails on the provided arguments. + * @return the message in the exception thrown by the failure + */ + private String assertSelectAllRejected( NodeTuple> nodeTuple, + Map blacklist, NodeSelector nodeSelector) { + try { + RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector); + throw new AssertionError("expected selectHosts to fail"); + } catch (IOException e) { + return e.getMessage(); + } + } + private static RestClient createRestClient() { - HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; - return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null); + List nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200))); + return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), + new Header[] {}, nodes, null, null); } + + } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index d73c29bd91bc4..0cc41b078b8d6 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,7 +36,9 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -72,6 +74,19 @@ */ @SuppressWarnings("unused") public class RestClientDocumentation { + private static final String TOKEN = "DUMMY"; + + // tag::rest-client-options-singleton + private static final RequestOptions COMMON_OPTIONS; + static { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> + builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2> + builder.setHttpAsyncResponseConsumerFactory( // <3> + new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); + COMMON_OPTIONS = builder.build(); + } + // end::rest-client-options-singleton @SuppressWarnings("unused") public void testUsage() throws IOException, InterruptedException { @@ -104,7 +119,7 @@ public void testUsage() throws IOException, InterruptedException { RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); builder.setFailureListener(new RestClient.FailureListener() { @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { // <1> } }); @@ -172,22 +187,14 @@ public void onFailure(Exception exception) { //tag::rest-client-body-shorter request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter - { - //tag::rest-client-headers - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Accept", "text/plain"); - options.addHeader("Cache-Control", "no-cache"); - request.setOptions(options); - //end::rest-client-headers - } - { - //tag::rest-client-response-consumer - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setHttpAsyncResponseConsumerFactory( - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); - request.setOptions(options); - //end::rest-client-response-consumer - } + //tag::rest-client-options-set-singleton + request.setOptions(COMMON_OPTIONS); + //end::rest-client-options-set-singleton + //tag::rest-client-options-customize + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java similarity index 50% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index 34a4988358653..da7ef4700fd2f 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchHostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -26,31 +26,34 @@ import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.Node.Roles; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.ArrayList; -import java.util.Collections; +import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.TimeUnit; /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. - * Compatible with elasticsearch 5.x and 2.x. + * Compatible with elasticsearch 2.x+. */ -public final class ElasticsearchHostsSniffer implements HostsSniffer { +public final class ElasticsearchNodesSniffer implements NodesSniffer { - private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class); + private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class); public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); private final RestClient restClient; - private final Map sniffRequestParams; + private final Request request; private final Scheme scheme; private final JsonFactory jsonFactory = new JsonFactory(); @@ -62,8 +65,8 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer { * that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same * client that was used to fetch them. */ - public ElasticsearchHostsSniffer(RestClient restClient) { - this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP); + public ElasticsearchNodesSniffer(RestClient restClient) { + this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP); } /** @@ -77,30 +80,32 @@ public ElasticsearchHostsSniffer(RestClient restClient) { * that have responded within this timeout will be returned. * @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) */ - public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { + public ElasticsearchNodesSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null"); if (sniffRequestTimeoutMillis < 0) { throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); } - this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); + this.request = new Request("GET", "/_nodes/http"); + request.addParameter("timeout", sniffRequestTimeoutMillis + "ms"); this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null"); } /** * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts */ - public List sniffHosts() throws IOException { - Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams); - return readHosts(response.getEntity()); + @Override + public List sniff() throws IOException { + Response response = restClient.performRequest(request); + return readHosts(response.getEntity(), scheme, jsonFactory); } - private List readHosts(HttpEntity entity) throws IOException { + static List readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException { try (InputStream inputStream = entity.getContent()) { JsonParser parser = jsonFactory.createParser(inputStream); if (parser.nextToken() != JsonToken.START_OBJECT) { throw new IOException("expected data to start with an object"); } - List hosts = new ArrayList<>(); + List nodes = new ArrayList<>(); while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if ("nodes".equals(parser.getCurrentName())) { @@ -108,10 +113,9 @@ private List readHosts(HttpEntity entity) throws IOException { JsonToken token = parser.nextToken(); assert token == JsonToken.START_OBJECT; String nodeId = parser.getCurrentName(); - HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); - if (sniffedHost != null) { - logger.trace("adding node [" + nodeId + "]"); - hosts.add(sniffedHost); + Node node = readNode(nodeId, parser, scheme); + if (node != null) { + nodes.add(node); } } } else { @@ -119,13 +123,31 @@ private List readHosts(HttpEntity entity) throws IOException { } } } - return hosts; + return nodes; } } - private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { - HttpHost httpHost = null; + private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException { + HttpHost publishedHost = null; + /* + * We sniff the bound hosts so we can look up the node based on any + * address on which it is listening. This is useful in Elasticsearch's + * test framework where we sometimes publish ipv6 addresses but the + * tests contact the node on ipv4. + */ + Set boundHosts = new HashSet<>(); + String name = null; + String version = null; String fieldName = null; + // Used to read roles from 5.0+ + boolean sawRoles = false; + boolean master = false; + boolean data = false; + boolean ingest = false; + // Used to read roles from 2.x + Boolean masterAttribute = null; + Boolean dataAttribute = null; + boolean clientAttribute = false; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -133,9 +155,27 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme if ("http".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { - URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); - httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), - boundAddressAsURI.getScheme()); + URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(), + publishAddressAsURI.getScheme()); + } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_ARRAY) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme())); + } + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else if ("attributes".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { + masterAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { + dataAttribute = toBoolean(parser.getValueAsString()); + } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { + clientAttribute = toBoolean(parser.getValueAsString()); } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { parser.skipChildren(); } @@ -143,14 +183,55 @@ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme } else { parser.skipChildren(); } + } else if (parser.currentToken() == JsonToken.START_ARRAY) { + if ("roles".equals(fieldName)) { + sawRoles = true; + while (parser.nextToken() != JsonToken.END_ARRAY) { + switch (parser.getText()) { + case "master": + master = true; + break; + case "data": + data = true; + break; + case "ingest": + ingest = true; + break; + default: + logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]"); + } + } + } else { + parser.skipChildren(); + } + } else if (parser.currentToken().isScalarValue()) { + if ("version".equals(fieldName)) { + version = parser.getText(); + } else if ("name".equals(fieldName)) { + name = parser.getText(); + } } } //http section is not present if http is not enabled on the node, ignore such nodes - if (httpHost == null) { + if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; + } else { + logger.trace("adding node [" + nodeId + "]"); + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); } - return httpHost; } public enum Scheme { @@ -167,4 +248,15 @@ public String toString() { return name; } } + + private static boolean toBoolean(String string) { + switch (string) { + case "true": + return true; + case "false": + return false; + default: + throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); + } + } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java similarity index 85% rename from client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java rename to client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java index 9eb7b34425944..c22c18f6eae32 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/NodesSniffer.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.io.IOException; import java.util.List; @@ -27,9 +27,9 @@ /** * Responsible for sniffing the http hosts */ -public interface HostsSniffer { +public interface NodesSniffer { /** - * Returns the sniffed http hosts + * Returns the sniffed Elasticsearch nodes. */ - List sniffHosts() throws IOException; + List sniff() throws IOException; } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java index 41051555bae2c..9d5627922823d 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import java.util.Objects; @@ -54,7 +54,7 @@ public void setSniffer(Sniffer sniffer) { } @Override - public void onFailure(HttpHost host) { + public void onFailure(Node node) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index dc873ccd44e10..73780586e7617 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -21,7 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -29,6 +29,7 @@ import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.Collection; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -43,7 +44,7 @@ /** * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of * {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults. - * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. + * A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link RestClient} instance. * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to * {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. @@ -53,7 +54,7 @@ public class Sniffer implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; - private final HostsSniffer hostsSniffer; + private final NodesSniffer nodesSniffer; private final RestClient restClient; private final long sniffIntervalMillis; private final long sniffAfterFailureDelayMillis; @@ -61,12 +62,12 @@ public class Sniffer implements Closeable { private final AtomicBoolean initialized = new AtomicBoolean(false); private volatile ScheduledTask nextScheduledTask; - Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { - this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); + Sniffer(RestClient restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) { + this(restClient, nodesSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); } - Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { - this.hostsSniffer = hostsSniffer; + Sniffer(RestClient restClient, NodesSniffer nodesSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { + this.nodesSniffer = nodesSniffer; this.restClient = restClient; this.sniffIntervalMillis = sniffInterval; this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; @@ -205,14 +206,14 @@ boolean skip() { } final void sniff() throws IOException { - List sniffedHosts = hostsSniffer.sniffHosts(); + List sniffedNodes = nodesSniffer.sniff(); if (logger.isDebugEnabled()) { - logger.debug("sniffed hosts: " + sniffedHosts); + logger.debug("sniffed nodes: " + sniffedNodes); } - if (sniffedHosts.isEmpty()) { - logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + if (sniffedNodes.isEmpty()) { + logger.warn("no nodes to set, nodes will be updated at the next sniffing round"); } else { - restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + restClient.setNodes(sniffedNodes); } } @@ -227,7 +228,8 @@ public void close() { /** * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. * - * @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched + * @param restClient the client that gets its hosts set (via + * {@link RestClient#setNodes(Collection)}) once they are fetched * @return a new instance of {@link SnifferBuilder} */ public static SnifferBuilder builder(RestClient restClient) { diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java index 010a8a4a78d20..48ca52d423012 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SnifferBuilder.java @@ -34,7 +34,7 @@ public final class SnifferBuilder { private final RestClient restClient; private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; - private HostsSniffer hostsSniffer; + private NodesSniffer nodesSniffer; /** * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch @@ -69,13 +69,13 @@ public SnifferBuilder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelay } /** - * Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer} - * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer}, + * Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer} + * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer}, * or to provide a different implementation (e.g. in case hosts need to taken from a different source). */ - public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { - Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); - this.hostsSniffer = hostsSniffer; + public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) { + Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null"); + this.nodesSniffer = nodesSniffer; return this; } @@ -83,9 +83,9 @@ public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - if (hostsSniffer == null) { - this.hostsSniffer = new ElasticsearchHostsSniffer(restClient); + if (nodesSniffer == null) { + this.nodesSniffer = new ElasticsearchNodesSniffer(restClient); } - return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); + return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java new file mode 100644 index 0000000000000..712a836a17b8a --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.client.Node.Roles; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.fasterxml.jackson.core.JsonFactory; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertThat; + +/** + * Test parsing the response from the {@code /_nodes/http} API from fixed + * versions of Elasticsearch. + */ +public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase { + private void checkFile(String file, Node... expected) throws IOException { + InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(file); + if (in == null) { + throw new IllegalArgumentException("Couldn't find [" + file + "]"); + } + try { + HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); + List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); + // Use these assertions because the error messages are nicer than hasItems. + assertThat(nodes, hasSize(expected.length)); + for (Node expectedNode : expected) { + assertThat(nodes, hasItem(expectedNode)); + } + } finally { + in.close(); + } + } + + public void test2x() throws IOException { + checkFile("2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", true, false, false), + node(9202, "m2", "2.0.0", true, true, false), + node(9201, "m3", "2.0.0", true, false, false), + node(9205, "d1", "2.0.0", false, true, false), + node(9204, "d2", "2.0.0", false, true, false), + node(9203, "d3", "2.0.0", false, true, false), + node(9207, "c1", "2.0.0", false, false, false), + node(9206, "c2", "2.0.0", false, false, false)); + } + + public void test5x() throws IOException { + checkFile("5.0.0_nodes_http.json", + node(9200, "m1", "5.0.0", true, false, true), + node(9201, "m2", "5.0.0", true, true, true), + node(9202, "m3", "5.0.0", true, false, true), + node(9203, "d1", "5.0.0", false, true, true), + node(9204, "d2", "5.0.0", false, true, true), + node(9205, "d3", "5.0.0", false, true, true), + node(9206, "c1", "5.0.0", false, false, true), + node(9207, "c2", "5.0.0", false, false, true)); + } + + public void test6x() throws IOException { + checkFile("6.0.0_nodes_http.json", + node(9200, "m1", "6.0.0", true, false, true), + node(9201, "m2", "6.0.0", true, true, true), + node(9202, "m3", "6.0.0", true, false, true), + node(9203, "d1", "6.0.0", false, true, true), + node(9204, "d2", "6.0.0", false, true, true), + node(9205, "d3", "6.0.0", false, true, true), + node(9206, "c1", "6.0.0", false, false, true), + node(9207, "c2", "6.0.0", false, false, true)); + } + + private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { + HttpHost host = new HttpHost("127.0.0.1", port); + Set boundHosts = new HashSet<>(2); + boundHosts.add(host); + boundHosts.add(new HttpHost("[::1]", port)); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java similarity index 76% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index ed2744df31c61..260832ca90e17 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -30,6 +30,7 @@ import org.apache.http.Consts; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -44,10 +45,10 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -59,17 +60,17 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -public class ElasticsearchHostsSnifferTests extends RestClientTestCase { +public class ElasticsearchNodesSnifferTests extends RestClientTestCase { private int sniffRequestTimeout; - private ElasticsearchHostsSniffer.Scheme scheme; + private ElasticsearchNodesSniffer.Scheme scheme; private SniffResponse sniffResponse; private HttpServer httpServer; @Before public void startHttpServer() throws IOException { this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); - this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); + this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchNodesSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); } else { @@ -86,7 +87,7 @@ public void stopHttpServer() throws IOException { public void testConstructorValidation() throws IOException { try { - new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch(NullPointerException e) { assertEquals("restClient cannot be null", e.getMessage()); @@ -94,14 +95,14 @@ public void testConstructorValidation() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { try { - new ElasticsearchHostsSniffer(restClient, 1, null); + new ElasticsearchNodesSniffer(restClient, 1, null); fail("should have failed"); } catch (NullPointerException e) { assertEquals(e.getMessage(), "scheme cannot be null"); } try { - new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), - ElasticsearchHostsSniffer.Scheme.HTTP); + new ElasticsearchNodesSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), + ElasticsearchNodesSniffer.Scheme.HTTP); fail("should have failed"); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); @@ -112,17 +113,13 @@ public void testConstructorValidation() throws IOException { public void testSniffNodes() throws IOException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { - ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme); + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme); try { - List sniffedHosts = sniffer.sniffHosts(); + List sniffedNodes = sniffer.sniff(); if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); - Iterator responseHostsIterator = sniffResponse.hosts.iterator(); - for (HttpHost sniffedHost : sniffedHosts) { - assertEquals(sniffedHost, responseHostsIterator.next()); - } + assertEquals(sniffResponse.result, sniffedNodes); } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { @@ -173,9 +170,9 @@ public void handle(HttpExchange httpExchange) throws IOException { } } - private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { + private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException { int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); - List hosts = new ArrayList<>(numNodes); + List nodes = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); JsonGenerator generator = jsonFactory.createGenerator(writer); @@ -190,6 +187,23 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeObjectFieldStart("nodes"); for (int i = 0; i < numNodes; i++) { String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); + String host = "host" + i; + int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); + HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + Set boundHosts = new HashSet<>(); + boundHosts.add(publishHost); + + if (randomBoolean()) { + int bound = between(1, 5); + for (int b = 0; b < bound; b++) { + boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + } + } + + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); @@ -203,44 +217,45 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme } boolean isHttpEnabled = rarely() == false; if (isHttpEnabled) { - String host = "host" + i; - int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost httpHost = new HttpHost(host, port, scheme.toString()); - hosts.add(httpHost); + nodes.add(node); generator.writeObjectFieldStart("http"); - if (getRandom().nextBoolean()) { - generator.writeArrayFieldStart("bound_address"); - generator.writeString("[fe80::1]:" + port); - generator.writeString("[::1]:" + port); - generator.writeString("127.0.0.1:" + port); - generator.writeEndArray(); + generator.writeArrayFieldStart("bound_address"); + for (HttpHost bound : boundHosts) { + generator.writeString(bound.toHostString()); } + generator.writeEndArray(); if (getRandom().nextBoolean()) { generator.writeObjectFieldStart("bogus_object"); generator.writeEndObject(); } - generator.writeStringField("publish_address", httpHost.toHostString()); + generator.writeStringField("publish_address", publishHost.toHostString()); if (getRandom().nextBoolean()) { generator.writeNumberField("max_content_length_in_bytes", 104857600); } generator.writeEndObject(); } - if (getRandom().nextBoolean()) { - String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Set nodeRoles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - String role; - do { - role = RandomPicks.randomFrom(getRandom(), roles); - } while(nodeRoles.add(role) == false); + + List roles = Arrays.asList(new String[] {"master", "data", "ingest"}); + Collections.shuffle(roles, getRandom()); + generator.writeArrayFieldStart("roles"); + for (String role : roles) { + if ("master".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("master"); } - generator.writeArrayFieldStart("roles"); - for (String nodeRole : nodeRoles) { - generator.writeString(nodeRole); + if ("data".equals(role) && node.getRoles().isData()) { + generator.writeString("data"); + } + if ("ingest".equals(role) && node.getRoles().isIngest()) { + generator.writeString("ingest"); } - generator.writeEndArray(); } + generator.writeEndArray(); + + generator.writeFieldName("version"); + generator.writeString(node.getVersion()); + generator.writeFieldName("name"); + generator.writeString(node.getName()); + int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { @@ -260,18 +275,18 @@ private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme generator.writeEndObject(); generator.writeEndObject(); generator.close(); - return SniffResponse.buildResponse(writer.toString(), hosts); + return SniffResponse.buildResponse(writer.toString(), nodes); } private static class SniffResponse { private final String nodesInfoBody; private final int nodesInfoResponseCode; - private final List hosts; + private final List result; private final boolean isFailure; - SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) { + SniffResponse(String nodesInfoBody, List result, boolean isFailure) { this.nodesInfoBody = nodesInfoBody; - this.hosts = hosts; + this.result = result; this.isFailure = isFailure; if (isFailure) { this.nodesInfoResponseCode = randomErrorResponseCode(); @@ -281,11 +296,11 @@ private static class SniffResponse { } static SniffResponse buildFailure() { - return new SniffResponse("", Collections.emptyList(), true); + return new SniffResponse("", Collections.emptyList(), true); } - static SniffResponse buildResponse(String nodesInfoBody, List hosts) { - return new SniffResponse(nodesInfoBody, hosts, false); + static SniffResponse buildResponse(String nodesInfoBody, List nodes) { + return new SniffResponse(nodesInfoBody, nodes, false); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java similarity index 78% rename from client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java index 7550459e9ea50..8acd929498e1b 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockNodesSniffer.java @@ -20,16 +20,17 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import java.util.Collections; import java.util.List; /** - * Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc. + * Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc. */ -class MockHostsSniffer implements HostsSniffer { +class MockNodesSniffer implements NodesSniffer { @Override - public List sniffHosts() { - return Collections.singletonList(new HttpHost("localhost", 9200)); + public List sniff() { + return Collections.singletonList(new Node(new HttpHost("localhost", 9200))); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java index 1fece270ffe0d..225bdb9a0097e 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; @@ -46,7 +47,7 @@ public void testSetSniffer() throws Exception { } try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) { - try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) { + try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) { listener.setSniffer(sniffer); try { listener.setSniffer(sniffer); @@ -54,7 +55,7 @@ public void testSetSniffer() throws Exception { } catch(IllegalStateException e) { assertEquals("sniffer can only be set once", e.getMessage()); } - listener.onFailure(new HttpHost("localhost", 9200)); + listener.onFailure(new Node(new HttpHost("localhost", 9200))); } } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 9a7359e9c7215..f924a9fbebc81 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -61,10 +61,10 @@ public void testBuild() throws Exception { try { - Sniffer.builder(client).setHostsSniffer(null); + Sniffer.builder(client).setNodesSniffer(null); fail("should have failed"); } catch(NullPointerException e) { - assertEquals("hostsSniffer cannot be null", e.getMessage()); + assertEquals("nodesSniffer cannot be null", e.getMessage()); } @@ -80,7 +80,7 @@ public void testBuild() throws Exception { builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { - builder.setHostsSniffer(new MockHostsSniffer()); + builder.setNodesSniffer(new MockNodesSniffer()); } try (Sniffer sniffer = builder.build()) { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 8172774a77d80..00c5eb31d17e8 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -20,11 +20,11 @@ package org.elasticsearch.client.sniff; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; import org.elasticsearch.client.sniff.Sniffer.Scheduler; -import org.mockito.Matchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -62,6 +62,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyCollectionOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -71,12 +72,12 @@ public class SnifferTests extends RestClientTestCase { /** - * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation + * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link NodesSniffer} implementation * to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance. */ public void testSniff() throws IOException { - HttpHost initialHost = new HttpHost("localhost", 9200); - try (RestClient restClient = RestClient.builder(initialHost).build()) { + Node initialNode = new Node(new HttpHost("localhost", 9200)); + try (RestClient restClient = RestClient.builder(initialNode).build()) { Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -88,53 +89,53 @@ public void shutdown() { } }; - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); int iters = randomIntBetween(5, 30); - try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){ + try (Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 1000L, -1)){ { - assertEquals(1, restClient.getHosts().size()); - HttpHost httpHost = restClient.getHosts().get(0); - assertEquals("localhost", httpHost.getHostName()); - assertEquals(9200, httpHost.getPort()); + assertEquals(1, restClient.getNodes().size()); + Node node = restClient.getNodes().get(0); + assertEquals("localhost", node.getHost().getHostName()); + assertEquals(9200, node.getHost().getPort()); } int emptyList = 0; int failures = 0; int runs = 0; - List lastHosts = Collections.singletonList(initialHost); + List lastNodes = Collections.singletonList(initialNode); for (int i = 0; i < iters; i++) { try { runs++; sniffer.sniff(); - if (hostsSniffer.failures.get() > failures) { + if (nodesSniffer.failures.get() > failures) { failures++; - fail("should have failed given that hostsSniffer says it threw an exception"); - } else if (hostsSniffer.emptyList.get() > emptyList) { + fail("should have failed given that nodesSniffer says it threw an exception"); + } else if (nodesSniffer.emptyList.get() > emptyList) { emptyList++; - assertEquals(lastHosts, restClient.getHosts()); + assertEquals(lastNodes, restClient.getNodes()); } else { - assertNotEquals(lastHosts, restClient.getHosts()); - List expectedHosts = CountingHostsSniffer.buildHosts(runs); - assertEquals(expectedHosts, restClient.getHosts()); - lastHosts = restClient.getHosts(); + assertNotEquals(lastNodes, restClient.getNodes()); + List expectedNodes = CountingNodesSniffer.buildNodes(runs); + assertEquals(expectedNodes, restClient.getNodes()); + lastNodes = restClient.getNodes(); } } catch(IOException e) { - if (hostsSniffer.failures.get() > failures) { + if (nodesSniffer.failures.get() > failures) { failures++; assertEquals("communication breakdown", e.getMessage()); } } } - assertEquals(hostsSniffer.emptyList.get(), emptyList); - assertEquals(hostsSniffer.failures.get(), failures); - assertEquals(hostsSniffer.runs.get(), runs); + assertEquals(nodesSniffer.emptyList.get(), emptyList); + assertEquals(nodesSniffer.failures.get(), failures); + assertEquals(nodesSniffer.runs.get(), runs); } } } /** - * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}. + * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link NodesSniffer}. * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. - * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes + * The {@link CountingNodesSniffer} doesn't make any network connection but may throw exception or return no nodes, which makes * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values. @@ -143,7 +144,7 @@ public void testOrdinarySniffRounds() throws Exception { final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); final int iters = randomIntBetween(30, 100); final Set> futures = new CopyOnWriteArraySet<>(); final CountDownLatch completionLatch = new CountDownLatch(1); @@ -185,7 +186,7 @@ public void shutdown() { } }; try { - new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); assertEquals(iters, futures.size()); //the last future is the only one that may not be completed yet, as the count down happens @@ -200,10 +201,10 @@ public void shutdown() { executor.shutdown(); assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); } - int totalRuns = hostsSniffer.runs.get(); + int totalRuns = nodesSniffer.runs.get(); assertEquals(iters, totalRuns); - int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -234,7 +235,7 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); + Sniffer sniffer = new Sniffer(restClient, new MockNodesSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); assertEquals(0, shutdown.get()); int iters = randomIntBetween(3, 10); for (int i = 1; i <= iters; i++) { @@ -246,7 +247,7 @@ public void shutdown() { public void testSniffOnFailureNotInitialized() { RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); final AtomicInteger scheduleCalls = new AtomicInteger(0); @@ -262,15 +263,15 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); for (int i = 0; i < 10; i++) { sniffer.sniffOnFailure(); } assertEquals(1, scheduleCalls.get()); - int totalRuns = hostsSniffer.runs.get(); + int totalRuns = nodesSniffer.runs.get(); assertEquals(0, totalRuns); - int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } @@ -281,7 +282,7 @@ public void shutdown() { */ public void testSniffOnFailure() throws Exception { RestClient restClient = mock(RestClient.class); - CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + CountingNodesSniffer nodesSniffer = new CountingNodesSniffer(); final AtomicBoolean initializing = new AtomicBoolean(true); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); @@ -351,7 +352,7 @@ private Future scheduleOrSubmit(Sniffer.Task task) { public void shutdown() { } }; - final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + final Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); @@ -413,9 +414,9 @@ public void run() { } assertEquals(onFailureTasks.size(), cancelledTasks); - assertEquals(completedTasks, hostsSniffer.runs.get()); - int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); - verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + assertEquals(completedTasks, nodesSniffer.runs.get()); + int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get(); + verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class)); verifyNoMoreInteractions(restClient); } finally { executor.shutdown(); @@ -446,7 +447,7 @@ private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task public void testTaskCancelling() throws Exception { RestClient restClient = mock(RestClient.class); - HostsSniffer hostsSniffer = mock(HostsSniffer.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -457,7 +458,7 @@ public Future schedule(Sniffer.Task task, long delayMillis) { public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { int numIters = randomIntBetween(50, 100); @@ -540,18 +541,18 @@ boolean await() throws InterruptedException { } /** - * Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host. - * It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled. + * Mock {@link NodesSniffer} implementation used for testing, which most of the times return a fixed node. + * It rarely throws exception or return an empty list of nodes, to make sure that such situations are properly handled. * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run * at a given point in time. */ - private static class CountingHostsSniffer implements HostsSniffer { + private static class CountingNodesSniffer implements NodesSniffer { private final AtomicInteger runs = new AtomicInteger(0); private final AtomicInteger failures = new AtomicInteger(0); private final AtomicInteger emptyList = new AtomicInteger(0); @Override - public List sniffHosts() throws IOException { + public List sniff() throws IOException { int run = runs.incrementAndGet(); if (rarely()) { failures.incrementAndGet(); @@ -562,24 +563,23 @@ public List sniffHosts() throws IOException { emptyList.incrementAndGet(); return Collections.emptyList(); } - return buildHosts(run); + return buildNodes(run); } - private static List buildHosts(int run) { + private static List buildNodes(int run) { int size = run % 5 + 1; assert size > 0; - List hosts = new ArrayList<>(size); + List nodes = new ArrayList<>(size); for (int i = 0; i < size; i++) { - hosts.add(new HttpHost("sniffed-" + run, 9200 + i)); + nodes.add(new Node(new HttpHost("sniffed-" + run, 9200 + i))); } - return hosts; + return nodes; } } - @SuppressWarnings("unchecked") public void testDefaultSchedulerSchedule() { RestClient restClient = mock(RestClient.class); - HostsSniffer hostsSniffer = mock(HostsSniffer.class); + NodesSniffer nodesSniffer = mock(NodesSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override public Future schedule(Sniffer.Task task, long delayMillis) { @@ -591,7 +591,7 @@ public void shutdown() { } }; - Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L); Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java index 199632d478f81..5f305024dba20 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/documentation/SnifferDocumentation.java @@ -20,9 +20,10 @@ package org.elasticsearch.client.sniff.documentation; import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.HostsSniffer; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; +import org.elasticsearch.client.sniff.NodesSniffer; import org.elasticsearch.client.sniff.SniffOnFailureListener; import org.elasticsearch.client.sniff.Sniffer; @@ -91,12 +92,12 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, - ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, - ElasticsearchHostsSniffer.Scheme.HTTPS); + ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, + ElasticsearchNodesSniffer.Scheme.HTTPS); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniffer-https } { @@ -104,28 +105,28 @@ public void testUsage() throws IOException { RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( + NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer( restClient, TimeUnit.SECONDS.toMillis(5), - ElasticsearchHostsSniffer.Scheme.HTTP); + ElasticsearchNodesSniffer.Scheme.HTTP); Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); + .setNodesSniffer(nodesSniffer).build(); //end::sniff-request-timeout } { - //tag::custom-hosts-sniffer + //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( new HttpHost("localhost", 9200, "http")) .build(); - HostsSniffer hostsSniffer = new HostsSniffer() { + NodesSniffer nodesSniffer = new NodesSniffer() { @Override - public List sniffHosts() throws IOException { + public List sniff() throws IOException { return null; // <1> } }; Sniffer sniffer = Sniffer.builder(restClient) - .setHostsSniffer(hostsSniffer).build(); - //end::custom-hosts-sniffer + .setNodesSniffer(nodesSniffer).build(); + //end::custom-nodes-sniffer } } } diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json new file mode 100644 index 0000000000000..b370e78e16011 --- /dev/null +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -0,0 +1,141 @@ +{ + "cluster_name" : "elasticsearch", + "nodes" : { + "qYUZ_8bTRwODPxukDlFw6Q" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9204", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "Yej5UVNgR2KgBjUFHOQpCw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9207", + "attributes" : { + "data" : "false", + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "mHttJwhwReangKEx9EGuAg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9201", + "attributes" : { + "data" : "false", + "master" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "6Erdptt_QRGLxMiLi9mTkg" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9206", + "attributes" : { + "data" : "false", + "client" : "true" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "mLRCZBypTiys6e8KY5DMnA" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9200", + "attributes" : { + "data" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "pVqOhytXQwetsZVzCBppYw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9202", + "http" : { + "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "ARyzVfpJSw2a9TOIUpbsBA" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9205", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "2Hpid-g5Sc2BKCevhN6VQw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "2.0.0", + "build" : "de54438", + "http_address" : "127.0.0.1:9203", + "attributes" : { + "master" : "false" + }, + "http" : { + "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json new file mode 100644 index 0000000000000..7a7d143ecaf43 --- /dev/null +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "DXz_rhcdSF2xJ96qyjaLVw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + }, + "53Mi6jYdRgeR1cdyuoNfQQ" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "XBIghcHiRlWP9c4vY6rETw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "cFM30FlyS8K1njH_bovwwQ" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "eoVUVRGNRDyyOapqIcrsIA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "xPN76uDcTP-DyXaRzPg2NQ" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "RY0oW2d7TISEqazk-U4Kcw" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "tU0rXEZmQ9GsWfn2TQ4kow" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json new file mode 100644 index 0000000000000..5a8905da64c89 --- /dev/null +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -0,0 +1,169 @@ +{ + "_nodes" : { + "total" : 8, + "successful" : 8, + "failed" : 0 + }, + "cluster_name" : "test", + "nodes" : { + "FX9npqGQSL2mOGF8Zkf3hw" : { + "name" : "m2", + "transport_address" : "127.0.0.1:9301", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9201", + "127.0.0.1:9201" + ], + "publish_address" : "127.0.0.1:9201", + "max_content_length_in_bytes" : 104857600 + } + }, + "jmUqzYLGTbWCg127kve3Tg" : { + "name" : "d1", + "transport_address" : "127.0.0.1:9303", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9203", + "127.0.0.1:9203" + ], + "publish_address" : "127.0.0.1:9203", + "max_content_length_in_bytes" : 104857600 + } + }, + "soBU6bzvTOqdLxPstSbJ2g" : { + "name" : "d3", + "transport_address" : "127.0.0.1:9305", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9205", + "127.0.0.1:9205" + ], + "publish_address" : "127.0.0.1:9205", + "max_content_length_in_bytes" : 104857600 + } + }, + "mtYDAhURTP6twdmNAkMnOg" : { + "name" : "m3", + "transport_address" : "127.0.0.1:9302", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9202", + "127.0.0.1:9202" + ], + "publish_address" : "127.0.0.1:9202", + "max_content_length_in_bytes" : 104857600 + } + }, + "URxHiUQPROOt1G22Ev6lXw" : { + "name" : "c2", + "transport_address" : "127.0.0.1:9307", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9207", + "127.0.0.1:9207" + ], + "publish_address" : "127.0.0.1:9207", + "max_content_length_in_bytes" : 104857600 + } + }, + "_06S_kWoRqqFR8Z8CS3JRw" : { + "name" : "c1", + "transport_address" : "127.0.0.1:9306", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9206", + "127.0.0.1:9206" + ], + "publish_address" : "127.0.0.1:9206", + "max_content_length_in_bytes" : 104857600 + } + }, + "QZE5Bd6DQJmnfVs2dglOvA" : { + "name" : "d2", + "transport_address" : "127.0.0.1:9304", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "data", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9204", + "127.0.0.1:9204" + ], + "publish_address" : "127.0.0.1:9204", + "max_content_length_in_bytes" : 104857600 + } + }, + "_3mTXg6dSweZn5ReB2fQqw" : { + "name" : "m1", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "6.0.0", + "build_hash" : "8f0685b", + "roles" : [ + "master", + "ingest" + ], + "http" : { + "bound_address" : [ + "[::1]:9200", + "127.0.0.1:9200" + ], + "publish_address" : "127.0.0.1:9200", + "max_content_length_in_bytes" : 104857600 + } + } + } +} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt new file mode 100644 index 0000000000000..ccb9bb15edb55 --- /dev/null +++ b/client/sniffer/src/test/resources/readme.txt @@ -0,0 +1,4 @@ +`*_node_http.json` contains files created by spinning up toy clusters with a +few nodes in different configurations locally at various versions. They are +for testing `ElasticsearchNodesSniffer` against different versions of +Elasticsearch. diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 14a5058eb7272..3e9b9fa7ea08f 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -144,3 +144,13 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance will be referenced as `client`. + +[[java-rest-hight-getting-started-request-options]] +=== RequestOptions + +All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can +use to customize the request in ways that won't change how Elasticsearch +executes the request. For example, this is the place where you'd specify a +`NodeSelector` to control which node receives the request. See the +<> for +more examples of customizing the options. diff --git a/docs/java-rest/low-level/sniffer.asciidoc b/docs/java-rest/low-level/sniffer.asciidoc index 4f846847615ea..1ffaa519cfb50 100644 --- a/docs/java-rest/low-level/sniffer.asciidoc +++ b/docs/java-rest/low-level/sniffer.asciidoc @@ -55,7 +55,7 @@ dependencies { Once a `RestClient` instance has been created as shown in <>, a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient` to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster -and update them by calling `RestClient#setHosts`. +and update them by calling `RestClient#setNodes`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -105,7 +105,7 @@ on failure is not enabled like explained above. The Elasticsearch Nodes Info api doesn't return the protocol to use when connecting to the nodes but only their `host:port` key-pair, hence `http` is used by default. In case `https` should be used instead, the -`ElasticsearchHostsSniffer` instance has to be manually created and provided +`ElasticsearchNodesSniffer` instance has to be manually created and provided as follows: ["source","java",subs="attributes,callouts,macros"] @@ -125,12 +125,12 @@ cluster, the ones that have responded until then. include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout] -------------------------------------------------- -Also, a custom `HostsSniffer` implementation can be provided for advanced -use-cases that may require fetching the hosts from external sources rather +Also, a custom `NodesSniffer` implementation can be provided for advanced +use-cases that may require fetching the `Node`s from external sources rather than from Elasticsearch: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer] +include-tagged::{doc-tests}/SnifferDocumentation.java[custom-nodes-sniffer] -------------------------------------------------- <1> Fetch the hosts from the external source diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 012ce418226cd..407947000de35 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,24 +271,51 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can add one or more headers to send with the request: +[[java-rest-low-usage-request-options]] +==== RequestOptions + +The `RequestOptions` class holds parts of the request that should be shared +between many requests in the same application. You can make a singleton +instance and share it between all requests: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton] -------------------------------------------------- +<1> Add any headers needed by all requests. +<2> Set a `NodeSelector`. +<3> Customize the response consumer. + +`addHeader` is for headers that are required for authorization or to work with +a proxy in front of Elasticsearch. There is no need to set the `Content-Type` +header because the client will automatically set that from the `HttpEntity` +attached to the request. + +You can set the `NodeSelector` which controls which nodes will receive +requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice. You can also customize the response consumer used to buffer the asynchronous responses. The default consumer will buffer up to 100MB of response on the JVM heap. If the response is larger then the request will fail. You could, for example, lower the maximum size which might be useful if you are running -in a heap constrained environment: +in a heap constrained environment like the exmaple above. + +Once you've created the singleton you can use it when making requests: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-set-singleton] +-------------------------------------------------- + +You can also customize these options on a per request basis. For example, this +adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] -------------------------------------------------- + ==== Multiple parallel asynchronous actions The client is quite happy to execute many actions in parallel. The following diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c93873a5be429..c2259c7b55d14 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -197,6 +197,24 @@ header. The warnings must match exactly. Using it looks like this: id: 1 .... +If the arguments to `do` include `node_selector` then the request is only +sent to nodes that match the `node_selector`. Currently only the `version` +selector is supported and it has the same logic as the `version` field in +`skip`. It looks like this: + +.... +"test id": + - skip: + features: node_selector + - do: + node_selector: + version: " - 6.9.99" + index: + index: test-weird-index-中文 + type: weird.type + id: 1 + body: { foo: bar } +.... === `set` diff --git a/test/framework/build.gradle b/test/framework/build.gradle index c497b63469450..39f1b75242880 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks; dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch-cli:${version}" compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index ac9b87c8fc6fe..df79b8f4a7add 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.test.rest.yaml; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -45,22 +47,29 @@ public ClientYamlDocsTestClient(ClientYamlSuiteRestSpec restSpec, RestClient res super(restSpec, restClient, hosts, esVersion); } - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + @Override + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { if ("raw".equals(apiName)) { - // Raw requests are bit simpler.... + // Raw requests don't use the rest spec at all and are configured entirely by their parameters Map queryStringParams = new HashMap<>(params); String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); String path = "/" + Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); - // And everything else is a url parameter! + Request request = new Request(method, path); + // All other parameters are url parameters + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + setOptions(request, headers, nodeSelector); try { - Response response = restClient.performRequest(method, path, queryStringParams, entity); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch (ResponseException e) { throw new ClientYamlTestResponseException(e); } } - return super.callApi(apiName, params, entity, headers); + return super.callApi(apiName, params, entity, headers, nodeSelector); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 795d99c51ef43..ddf50c193d3b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,18 +19,19 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; @@ -75,8 +76,8 @@ public Version getEsVersion() { /** * Calls an api with the provided parameters and body */ - public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, Map headers) - throws IOException { + public ClientYamlTestResponse callApi(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { ClientYamlSuiteRestApi restApi = restApi(apiName); @@ -161,22 +162,33 @@ public ClientYamlTestResponse callApi(String apiName, Map params requestPath = finalPath.toString(); } - Header[] requestHeaders = new Header[headers.size()]; - int index = 0; - for (Map.Entry header : headers.entrySet()) { - logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); - requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); - } + logger.debug("calling api [{}]", apiName); + Request request = new Request(requestMethod, requestPath); + for (Map.Entry param : queryStringParams.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setEntity(entity); + setOptions(request, headers, nodeSelector); try { - Response response = restClient.performRequest(requestMethod, requestPath, queryStringParams, entity, requestHeaders); + Response response = restClient.performRequest(request); return new ClientYamlTestResponse(response); } catch(ResponseException e) { throw new ClientYamlTestResponseException(e); } } + protected static void setOptions(Request request, Map headers, NodeSelector nodeSelector) { + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Map.Entry header : headers.entrySet()) { + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); + options.addHeader(header.getKey(), header.getValue()); + } + options.setNodeSelector(nodeSelector); + request.setOptions(options); + } + private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, long contentLength) { if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { // The API doesn't claim to support GET anyway diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index ca04c0c53d12a..e1d889a899565 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -68,6 +69,15 @@ public class ClientYamlTestExecutionContext { */ public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, Map headers) throws IOException { + return callApi(apiName, params, bodies, headers, NodeSelector.ANY); + } + + /** + * Calls an elasticsearch api with the parameters and request body provided as arguments. + * Saves the obtained response in the execution context. + */ + public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, + Map headers, NodeSelector nodeSelector) throws IOException { //makes a copy of the parameters before modifying them for this specific request Map requestParams = new HashMap<>(params); requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params @@ -87,7 +97,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params HttpEntity entity = createEntity(bodies, requestHeaders); try { - response = callApiInternal(apiName, requestParams, entity, requestHeaders); + response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector); return response; } catch(ClientYamlTestResponseException e) { response = e.getRestTestResponse(); @@ -153,9 +163,9 @@ private BytesRef bodyAsBytesRef(Map bodyAsMap, XContentType xCon } // pkg-private for testing - ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, Map headers) throws IOException { - return clientYamlTestClient.callApi(apiName, params, entity, headers); + ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, + Map headers, NodeSelector nodeSelector) throws IOException { + return clientYamlTestClient.callApi(apiName, params, entity, headers, nodeSelector); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index de4b451807d99..9fa13859042ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,9 +22,11 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.elasticsearch.Version; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -47,11 +49,20 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; /** - * Runs a suite of yaml tests shared with all the official Elasticsearch clients against against an elasticsearch cluster. + * Runs a suite of yaml tests shared with all the official Elasticsearch + * clients against against an elasticsearch cluster. + *

+ * IMPORTANT: These tests sniff the cluster for metadata + * and hosts on startup and replace the list of hosts that they are + * configured to use with the list sniffed from the cluster. So you can't + * control which nodes receive the request by providing the right list of + * nodes in the tests.rest.cluster system property. Instead + * the tests must explictly use `node_selector`s. */ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { @@ -102,6 +113,11 @@ protected ESClientYamlSuiteTestCase(ClientYamlTestCandidate testCandidate) { @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { + // Sniff host metadata in case we need it in the yaml tests + List nodesWithMetadata = sniffHostMetadata(adminClient()); + client().setNodes(nodesWithMetadata); + adminClient().setNodes(nodesWithMetadata); + assert adminExecutionContext == null; assert blacklistPathMatchers == null; ClientYamlSuiteRestSpec restSpec = ClientYamlSuiteRestSpec.load(SPEC_PATH); @@ -376,4 +392,15 @@ private String errorMessage(ExecutableSection executableSection, Throwable t) { protected boolean randomizeContentType() { return true; } + + /** + * Sniff the cluster for host metadata. + */ + private List sniffHostMetadata(RestClient client) throws IOException { + ElasticsearchNodesSniffer.Scheme scheme = + ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT)); + ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer( + adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + return sniffer.sniff(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index 3168543b5554b..f3201f3ae60bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -37,6 +37,7 @@ public final class Features { "catch_unauthorized", "embedded_stash_key", "headers", + "node_selector", "stash_in_key", "stash_in_path", "stash_path_replace", diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java deleted file mode 100644 index de63b46eff313..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Parses YAML test {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite}s containing - * {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection}s. - */ -package org.elasticsearch.test.rest.yaml.parser; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java index 4553845458541..de73fefaea776 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Map; +import org.elasticsearch.client.NodeSelector; + import static java.util.Collections.unmodifiableMap; /** @@ -35,6 +37,7 @@ public class ApiCallSection { private final Map params = new HashMap<>(); private final Map headers = new HashMap<>(); private final List> bodies = new ArrayList<>(); + private NodeSelector nodeSelector = NodeSelector.ANY; public ApiCallSection(String api) { this.api = api; @@ -76,4 +79,18 @@ public void addBody(Map body) { public boolean hasBody() { return bodies.size() > 0; } + + /** + * Selects the node on which to run this request. + */ + public NodeSelector getNodeSelector() { + return nodeSelector; + } + + /** + * Set the selector that decides which node can run this request. + */ + public void setNodeSelector(NodeSelector nodeSelector) { + this.nodeSelector = nodeSelector; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java index 321d22ed70aa7..1ec2382fac596 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -91,6 +92,12 @@ public void addExecutableSection(ExecutableSection executableSection) { + "runners that do not support the [warnings] section can skip the test at line [" + doSection.getLocation().lineNumber + "]"); } + if (NodeSelector.ANY != doSection.getApiCallSection().getNodeSelector() + && false == skipSection.getFeatures().contains("node_selector")) { + throw new IllegalArgumentException("Attempted to add a [do] with a [node_selector] section without a corresponding " + + "[skip] so runners that do not support the [node_selector] section can skip the test at line [" + + doSection.getLocation().lineNumber + "]"); + } } this.executableSections.add(executableSection); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 81d5c1d32a94b..4754ea0fc4d66 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -20,6 +20,9 @@ package org.elasticsearch.test.rest.yaml.section; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -37,9 +40,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeMap; import java.util.regex.Matcher; @@ -84,6 +89,7 @@ public static DoSection parse(XContentParser parser) throws IOException { DoSection doSection = new DoSection(parser.getTokenLocation()); ApiCallSection apiCallSection = null; + NodeSelector nodeSelector = NodeSelector.ANY; Map headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); List expectedWarnings = new ArrayList<>(); @@ -120,6 +126,18 @@ public static DoSection parse(XContentParser parser) throws IOException { headers.put(headerName, parser.text()); } } + } else if ("node_selector".equals(currentFieldName)) { + String selectorName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + selectorName = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = buildNodeSelector( + parser.getTokenLocation(), selectorName, parser.text()); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); + } + } } else if (currentFieldName != null) { // must be part of API call then apiCallSection = new ApiCallSection(currentFieldName); String paramName = null; @@ -152,6 +170,7 @@ public static DoSection parse(XContentParser parser) throws IOException { throw new IllegalArgumentException("client call section is mandatory within a do section"); } apiCallSection.addHeaders(headers); + apiCallSection.setNodeSelector(nodeSelector); doSection.setApiCallSection(apiCallSection); doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); } finally { @@ -221,7 +240,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx try { ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), - apiCallSection.getBodies(), apiCallSection.getHeaders()); + apiCallSection.getBodies(), apiCallSection.getHeaders(), apiCallSection.getNodeSelector()); if (Strings.hasLength(catchParam)) { String catchStatusCode; if (catches.containsKey(catchParam)) { @@ -346,4 +365,61 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, not(equalTo(408)), not(equalTo(409))))); } + + private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + switch (name) { + case "version": + Version[] range = SkipSection.parseVersionRange(value); + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; + default: + throw new IllegalArgumentException("unknown node_selector [" + name + "]"); + } + } + + /** + * Selector that composes two selectors, running the "right" most selector + * first and then running the "left" selector on the results of the "right" + * selector. + */ + private static class ComposeNodeSelector implements NodeSelector { + private final NodeSelector lhs; + private final NodeSelector rhs; + + private ComposeNodeSelector(NodeSelector lhs, NodeSelector rhs) { + this.lhs = Objects.requireNonNull(lhs, "lhs is required"); + this.rhs = Objects.requireNonNull(rhs, "rhs is required"); + } + + @Override + public void select(Iterable nodes) { + rhs.select(nodes); + lhs.select(nodes); + } + + @Override + public String toString() { + // . as in haskell's "compose" operator + return lhs + "." + rhs; + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java index eb1fea4b79aed..e487f8e74da3b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java @@ -153,7 +153,7 @@ public boolean isEmpty() { return EMPTY.equals(this); } - private Version[] parseVersionRange(String versionRange) { + static Version[] parseVersionRange(String versionRange) { if (versionRange == null) { return new Version[] { null, null }; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 2150baf59eab0..fbf7f10e5e186 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -36,8 +37,7 @@ public void testHeadersSupportStashedValueReplacement() throws IOException { new ClientYamlTestExecutionContext(null, randomBoolean()) { @Override ClientYamlTestResponse callApiInternal(String apiName, Map params, - HttpEntity entity, - Map headers) { + HttpEntity entity, Map headers, NodeSelector nodeSelector) { headersRef.set(headers); return null; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index ecee131c7a28e..87f2d7f9a53f8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -35,11 +36,12 @@ import static org.hamcrest.Matchers.nullValue; public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testAddingDoWithoutWarningWithoutSkip() { + public void testAddingDoWithoutSkips() { int lineNumber = between(1, 10000); ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); section.setSkipSection(SkipSection.EMPTY); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -49,6 +51,7 @@ public void testAddingDoWithWarningWithSkip() { section.setSkipSection(new SkipSection(null, singletonList("warnings"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); section.addExecutableSection(doSection); } @@ -58,11 +61,37 @@ public void testAddingDoWithWarningWithSkipButNotWarnings() { section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the" + " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage()); } + public void testAddingDoWithNodeSelectorWithSkip() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + section.addExecutableSection(doSection); + } + + public void testAddingDoWithNodeSelectorWithSkipButNotWarnings() { + int lineNumber = between(1, 10000); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); + section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); + doSection.setApiCallSection(apiCall); + Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); + assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding" + + " [skip] so runners that do not support the [node_selector] section can skip the test at" + + " line [" + lineNumber + "]", e.getMessage()); + } + public void testWrongIndentation() throws Exception { { XContentParser parser = createParser(YamlXContent.yamlXContent, diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 982eac4b80274..d5ee934bc531d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -19,23 +19,35 @@ package org.elasticsearch.test.rest.yaml.section; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase { @@ -496,7 +508,40 @@ public void testParseDoSectionExpectedWarnings() throws Exception { assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( "just one entry this time"))); + } + + public void testNodeSelector() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node v170 = nodeWithVersion("1.7.0"); + Node v521 = nodeWithVersion("5.2.1"); + Node v550 = nodeWithVersion("5.5.0"); + Node v612 = nodeWithVersion("6.1.2"); + List nodes = new ArrayList<>(); + nodes.add(v170); + nodes.add(v521); + nodes.add(v550); + nodes.add(v612); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(v521, v550), nodes); + ClientYamlTestExecutionContext context = mock(ClientYamlTestExecutionContext.class); + ClientYamlTestResponse mockResponse = mock(ClientYamlTestResponse.class); + when(context.callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector())).thenReturn(mockResponse); + doSection.execute(context); + verify(context).callApi("indices.get_field_mapping", singletonMap("index", "test_index"), + emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); + } + private Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null); } private void assertJsonEquals(Map actual, String expected) throws IOException { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index af0c543678048..e2ca3516159a0 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -17,8 +17,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; -import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer.Scheme; +import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -303,11 +302,12 @@ static Sniffer createSniffer(final Config config, final RestClient client, final if (sniffingEnabled) { final List hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); // createHosts(config) ensures that all schemes are the same for all hosts! - final Scheme scheme = hosts.get(0).startsWith("https") ? Scheme.HTTPS : Scheme.HTTP; - final ElasticsearchHostsSniffer hostsSniffer = - new ElasticsearchHostsSniffer(client, ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); + final ElasticsearchNodesSniffer.Scheme scheme = hosts.get(0).startsWith("https") ? + ElasticsearchNodesSniffer.Scheme.HTTPS : ElasticsearchNodesSniffer.Scheme.HTTP; + final ElasticsearchNodesSniffer hostsSniffer = + new ElasticsearchNodesSniffer(client, ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); - sniffer = Sniffer.builder(client).setHostsSniffer(hostsSniffer).build(); + sniffer = Sniffer.builder(client).setNodesSniffer(hostsSniffer).build(); // inform the sniffer whenever there's a node failure listener.setSniffer(sniffer); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java index 92febdf3561f8..aa8d2da070eb5 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.common.Nullable; @@ -76,7 +77,8 @@ public void setResource(@Nullable final HttpResource resource) { } @Override - public void onFailure(final HttpHost host) { + public void onFailure(final Node node) { + HttpHost host = node.getHost(); logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort()); final HttpResource resource = this.resource.get(); @@ -90,4 +92,4 @@ public void onFailure(final HttpHost host) { } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 52eed801b3273..2c8c700fcf615 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; @@ -44,8 +45,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyMapOf; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; @@ -300,7 +299,7 @@ public void testCreateSniffer() throws IOException { final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON); when(response.getEntity()).thenReturn(entity); - when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response); + when(client.performRequest(any(Request.class))).thenReturn(response); try (Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) { assertThat(sniffer, not(nullValue())); @@ -309,7 +308,7 @@ public void testCreateSniffer() throws IOException { } // it's a race whether it triggers this at all - verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class)); + verify(client, atMost(1)).performRequest(any(Request.class)); verifyNoMoreInteractions(client, listener); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java index 08512e82e145d..a81874b7fa2c1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java @@ -7,6 +7,7 @@ import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce.AlreadySetException; +import org.elasticsearch.client.Node; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.test.ESTestCase; @@ -21,7 +22,7 @@ public class NodeFailureListenerTests extends ESTestCase { private final Sniffer sniffer = mock(Sniffer.class); private final HttpResource resource = new MockHttpResource(getTestName(), false); - private final HttpHost host = new HttpHost("localhost", 9200); + private final Node node = new Node(new HttpHost("localhost", 9200)); private final NodeFailureListener listener = new NodeFailureListener(); @@ -44,7 +45,7 @@ public void testSetResourceTwiceFails() { public void testSnifferNotifiedOnFailure() { listener.setSniffer(sniffer); - listener.onFailure(host); + listener.onFailure(node); verify(sniffer).sniffOnFailure(); } @@ -52,7 +53,7 @@ public void testSnifferNotifiedOnFailure() { public void testResourceNotifiedOnFailure() { listener.setResource(resource); - listener.onFailure(host); + listener.onFailure(node); assertTrue(resource.isDirty()); } @@ -64,7 +65,7 @@ public void testResourceAndSnifferNotifiedOnFailure() { listener.setResource(optionalResource); listener.setSniffer(optionalSniffer); - listener.onFailure(host); + listener.onFailure(node); if (optionalResource != null) { assertTrue(resource.isDirty()); From 3c0a375f87673ccd002923177d971c2c5489a691 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 08:02:47 -0400 Subject: [PATCH 05/41] LLClient: Fix assertion on windows In windows the exception message is ever so slightly differant than in Linux and OSX. That is fine. We'll just catch either. --- .../client/RestClientMultipleHostsIntegTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 92a960090ad6a..d09741ea25b6c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -42,7 +42,9 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -214,7 +216,8 @@ public void testNodeSelector() throws IOException { restClient.performRequest(request); fail("expected to fail to connect"); } catch (ConnectException e) { - assertEquals("Connection refused", e.getMessage()); + // This is different in windows and linux but this matches both. + assertThat(e.getMessage(), startsWith("Connection refused")); } } else { Response response = restClient.performRequest(request); From f03de827c0f58d90ce96db3d417a78c2d12912d6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 08:04:54 -0400 Subject: [PATCH 06/41] REST Client: NodeSelector for node attributes (#31296) Add a `NodeSelector` so that users can filter the nodes that receive requests based on node attributes. I believe we'll need this to backport #30523 and we want it anyway. I also added a bash script to help with rebuilding the sniffer parsing test documents. --- .../client/HasAttributeNodeSelector.java | 56 ++++ .../java/org/elasticsearch/client/Node.java | 27 +- .../client/HasAttributeNodeSelectorTests.java | 59 ++++ .../client/NodeSelectorTests.java | 5 +- .../org/elasticsearch/client/NodeTests.java | 50 ++- .../client/RestClientMultipleHostsTests.java | 2 +- .../elasticsearch/client/RestClientTests.java | 6 +- .../RestClientDocumentation.java | 20 +- .../sniff/ElasticsearchNodesSniffer.java | 126 +++++--- .../ElasticsearchNodesSnifferParseTests.java | 34 +- .../sniff/ElasticsearchNodesSnifferTests.java | 33 +- .../src/test/resources/2.0.0_nodes_http.json | 290 ++++++++++------- .../src/test/resources/5.0.0_nodes_http.json | 272 +++++++++------- .../src/test/resources/6.0.0_nodes_http.json | 294 ++++++++++-------- .../resources/create_test_nodes_info.bash | 107 +++++++ client/sniffer/src/test/resources/readme.txt | 2 + docs/java-rest/low-level/usage.asciidoc | 10 +- .../rest-api-spec/test/README.asciidoc | 17 +- .../test/rest/yaml/section/DoSection.java | 87 ++++-- .../rest/yaml/section/DoSectionTests.java | 89 +++++- 20 files changed, 1110 insertions(+), 476 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java create mode 100644 client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java create mode 100644 client/sniffer/src/test/resources/create_test_nodes_info.bash diff --git a/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java new file mode 100644 index 0000000000000..e4bb43458648b --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * A {@link NodeSelector} that selects nodes that have a particular value + * for an attribute. + */ +public final class HasAttributeNodeSelector implements NodeSelector { + private final String key; + private final String value; + + public HasAttributeNodeSelector(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public void select(Iterable nodes) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Map> allAttributes = itr.next().getAttributes(); + if (allAttributes == null) continue; + List values = allAttributes.get(key); + if (values == null || false == values.contains(value)) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return key + "=" + value; + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index d66d0773016e6..f180b52927545 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; +import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; @@ -52,13 +54,18 @@ public class Node { * if we don't know what roles the node has. */ private final Roles roles; + /** + * Attributes declared on the node. + */ + private final Map> attributes; /** * Create a {@linkplain Node} with metadata. All parameters except * {@code host} are nullable and implementations of {@link NodeSelector} * need to decide what to do in their absence. */ - public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + public Node(HttpHost host, Set boundHosts, String name, String version, + Roles roles, Map> attributes) { if (host == null) { throw new IllegalArgumentException("host cannot be null"); } @@ -67,13 +74,14 @@ public Node(HttpHost host, Set boundHosts, String name, String version this.name = name; this.version = version; this.roles = roles; + this.attributes = attributes; } /** * Create a {@linkplain Node} without any metadata. */ public Node(HttpHost host) { - this(host, null, null, null, null); + this(host, null, null, null, null, null); } /** @@ -115,6 +123,13 @@ public Roles getRoles() { return roles; } + /** + * Attributes declared on the node. + */ + public Map> getAttributes() { + return attributes; + } + @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -131,6 +146,9 @@ public String toString() { if (roles != null) { b.append(", roles=").append(roles); } + if (attributes != null) { + b.append(", attributes=").append(attributes); + } return b.append(']').toString(); } @@ -144,12 +162,13 @@ public boolean equals(Object obj) { && Objects.equals(boundHosts, other.boundHosts) && Objects.equals(name, other.name) && Objects.equals(version, other.version) - && Objects.equals(roles, other.roles); + && Objects.equals(roles, other.roles) + && Objects.equals(attributes, other.attributes); } @Override public int hashCode() { - return Objects.hash(host, boundHosts, name, version, roles); + return Objects.hash(host, boundHosts, name, version, roles, attributes); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java new file mode 100644 index 0000000000000..8a7c12e8c62de --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; + +public class HasAttributeNodeSelectorTests extends RestClientTestCase { + public void testHasAttribute() { + Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val"))); + Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval"))); + Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval"))); + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(hasAttributeValue); + nodes.add(hasAttributeButNotValue); + nodes.add(hasAttributeValueInList); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(hasAttributeValue); + expected.add(hasAttributeValueInList); + new HasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(Map> attributes) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index d9df001ad437e..868ccdcab757d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -63,9 +63,10 @@ public void testNotMasterOnly() { assertEquals(expected, nodes); } - private Node dummyNode(boolean master, boolean data, boolean ingest) { + private static Node dummyNode(boolean master, boolean data, boolean ingest) { return new Node(new HttpHost("dummy"), Collections.emptySet(), randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Roles(master, data, ingest)); + new Roles(master, data, ingest), + Collections.>emptyMap()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index c6d60415b88dc..9eeeb1144f485 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -23,49 +23,67 @@ import org.elasticsearch.client.Node.Roles; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { public void testToString() { + Map> attributes = new HashMap<>(); + attributes.put("foo", singletonList("bar")); + attributes.put("baz", Arrays.asList("bort", "zoom")); assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, attributes={foo=[bar], baz=[bort, zoom]}]", + new Node(new HttpHost("1"), null, null, null, null, attributes).toString()); assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), - null, null, null, new Roles(true, true, true)).toString()); + null, null, null, new Roles(true, true, true), null).toString()); assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), - null, null, "ver", null).toString()); + null, null, "ver", null, null).toString()); assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), - null, "nam", null, null).toString()); + null, "nam", null, null, null).toString()); assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), - new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); - assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null, null).toString()); + assertEquals( + "[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m, attributes={foo=[bar], baz=[bort, zoom]}]", new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), - "nam", "ver", new Roles(true, false, false)).toString()); + "nam", "ver", new Roles(true, false, false), attributes).toString()); } public void testEqualsAndHashCode() { HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); Node node = new Node(host, - randomBoolean() ? null : singleton(host), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : new Roles(true, true, true)); + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true), + randomBoolean() ? null : singletonMap("foo", singletonList("bar"))); assertFalse(node.equals(null)); assertTrue(node.equals(node)); assertEquals(node.hashCode(), node.hashCode()); - Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), + node.getRoles(), node.getAttributes()); assertTrue(node.equals(copy)); assertEquals(node.hashCode(), copy.hashCode()); assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), - node.getName(), node.getVersion(), node.getRoles()))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), - node.getName(), node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", + node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion() + "changed", node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), new Roles(false, false, false), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), node.getRoles(), singletonMap("bort", singletonList("bing"))))); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index eb591f4ccff3a..d04b3cbb7554e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -342,7 +342,7 @@ public void testSetNodes() throws IOException { List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); - newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null)); } restClient.setNodes(newNodes); int rounds = between(1, 10); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 01f6f308f6227..04742ccab4f32 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -341,9 +341,9 @@ public void testNullPath() throws IOException { } public void testSelectHosts() throws IOException { - Node n1 = new Node(new HttpHost("1"), null, null, "1", null); - Node n2 = new Node(new HttpHost("2"), null, null, "2", null); - Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null, null); NodeSelector not1 = new NodeSelector() { @Override diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 0cc41b078b8d6..d3a0202747d25 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,6 +36,7 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; @@ -190,11 +191,20 @@ public void onFailure(Exception exception) { //tag::rest-client-options-set-singleton request.setOptions(COMMON_OPTIONS); //end::rest-client-options-set-singleton - //tag::rest-client-options-customize - RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); - options.addHeader("cats", "knock things off of other things"); - request.setOptions(options); - //end::rest-client-options-customize + { + //tag::rest-client-options-customize-header + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize-header + } + { + //tag::rest-client-options-customize-attribute + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.setNodeSelector(new HasAttributeNodeSelector("rack", "c12")); // <1> + request.setOptions(options); + //end::rest-client-options-customize-attribute + } } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index da7ef4700fd2f..5c947f5625ba0 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -36,12 +36,18 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. * Compatible with elasticsearch 2.x+. @@ -138,16 +144,19 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th Set boundHosts = new HashSet<>(); String name = null; String version = null; - String fieldName = null; - // Used to read roles from 5.0+ + /* + * Multi-valued attributes come with key = `real_key.index` and we + * unflip them after reading them because we can't rely on the order + * that they arive. + */ + final Map protoAttributes = new HashMap(); + boolean sawRoles = false; boolean master = false; boolean data = false; boolean ingest = false; - // Used to read roles from 2.x - Boolean masterAttribute = null; - Boolean dataAttribute = null; - boolean clientAttribute = false; + + String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -170,13 +179,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th } } else if ("attributes".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { - masterAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { - dataAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { - clientAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING) { + String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString()); + if (oldValue != null) { + throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]"); + } + } else { parser.skipChildren(); } } @@ -216,21 +224,74 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; - } else { - logger.trace("adding node [" + nodeId + "]"); - if (version.startsWith("2.")) { - /* - * 2.x doesn't send roles, instead we try to read them from - * attributes. - */ - master = masterAttribute == null ? false == clientAttribute : masterAttribute; - data = dataAttribute == null ? false == clientAttribute : dataAttribute; - } else { - assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + + Map> realAttributes = new HashMap<>(protoAttributes.size()); + List keys = new ArrayList<>(protoAttributes.keySet()); + for (String key : keys) { + if (key.endsWith(".0")) { + String realKey = key.substring(0, key.length() - 2); + List values = new ArrayList<>(); + int i = 0; + while (true) { + String value = protoAttributes.remove(realKey + "." + i); + if (value == null) { + break; + } + values.add(value); + i++; + } + realAttributes.put(realKey, unmodifiableList(values)); } - assert boundHosts.contains(publishedHost) : - "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; - return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); + } + for (Map.Entry entry : protoAttributes.entrySet()) { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); + } + + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); + Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); + Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + logger.trace("adding node [" + nodeId + "]"); + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest), + unmodifiableMap(realAttributes)); + } + + /** + * Returns {@code defaultValue} if the attribute didn't come back, + * {@code true} or {@code false} if it did come back as + * either of those, or throws an IOException if the attribute + * came back in a strange way. + */ + private static Boolean v2RoleAttributeValue(Map> attributes, + String name, Boolean defaultValue) throws IOException { + List valueList = attributes.remove(name); + if (valueList == null) { + return defaultValue; + } + if (valueList.size() != 1) { + throw new IOException("expected only a single attribute value for [" + name + "] but got " + + valueList); + } + switch (valueList.get(0)) { + case "true": + return true; + case "false": + return false; + default: + throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + + valueList.get(0) + "]"); } } @@ -248,15 +309,4 @@ public String toString() { return name; } } - - private static boolean toBoolean(String string) { - switch (string) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); - } - } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java index 712a836a17b8a..edc7330c13074 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -30,14 +30,18 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import com.fasterxml.jackson.core.JsonFactory; -import static org.hamcrest.Matchers.hasItem; +import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; /** @@ -53,10 +57,14 @@ private void checkFile(String file, Node... expected) throws IOException { try { HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); - // Use these assertions because the error messages are nicer than hasItems. + /* + * Use these assertions because the error messages are nicer + * than hasItems and we know the results are in order because + * that is how we generated the file. + */ assertThat(nodes, hasSize(expected.length)); - for (Node expectedNode : expected) { - assertThat(nodes, hasItem(expectedNode)); + for (int i = 0; i < expected.length; i++) { + assertEquals(expected[i], nodes.get(i)); } } finally { in.close(); @@ -66,13 +74,13 @@ private void checkFile(String file, Node... expected) throws IOException { public void test2x() throws IOException { checkFile("2.0.0_nodes_http.json", node(9200, "m1", "2.0.0", true, false, false), - node(9202, "m2", "2.0.0", true, true, false), - node(9201, "m3", "2.0.0", true, false, false), - node(9205, "d1", "2.0.0", false, true, false), + node(9201, "m2", "2.0.0", true, true, false), + node(9202, "m3", "2.0.0", true, false, false), + node(9203, "d1", "2.0.0", false, true, false), node(9204, "d2", "2.0.0", false, true, false), - node(9203, "d3", "2.0.0", false, true, false), - node(9207, "c1", "2.0.0", false, false, false), - node(9206, "c2", "2.0.0", false, false, false)); + node(9205, "d3", "2.0.0", false, true, false), + node(9206, "c1", "2.0.0", false, false, false), + node(9207, "c2", "2.0.0", false, false, false)); } public void test5x() throws IOException { @@ -104,6 +112,10 @@ private Node node(int port, String name, String version, boolean master, boolean Set boundHosts = new HashSet<>(2); boundHosts.add(host); boundHosts.add(new HttpHost("[::1]", port)); - return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + Map> attributes = new HashMap<>(); + attributes.put("dummy", singletonList("everyone_has_me")); + attributes.put("number", singletonList(name.substring(1))); + attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest), attributes); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index 260832ca90e17..3d2a74685afcd 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -200,9 +200,21 @@ private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme } } + int numAttributes = between(0, 5); + Map> attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + int numValues = frequently() ? 1 : between(2, 5); + List values = new ArrayList<>(); + for (int v = 0; v < numValues; v++) { + values.add(j + "value" + v); + } + attributes.put("attr" + j, values); + } + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { @@ -256,18 +268,17 @@ private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme generator.writeFieldName("name"); generator.writeString(node.getName()); - int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Map attributes = new HashMap<>(numAttributes); - for (int j = 0; j < numAttributes; j++) { - attributes.put("attr" + j, "value" + j); - } if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); - } - for (Map.Entry entry : attributes.entrySet()) { - generator.writeStringField(entry.getKey(), entry.getValue()); - } - if (numAttributes > 0) { + for (Map.Entry> entry : attributes.entrySet()) { + if (entry.getValue().size() == 1) { + generator.writeStringField(entry.getKey(), entry.getValue().get(0)); + } else { + for (int v = 0; v < entry.getValue().size(); v++) { + generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); + } + } + } generator.writeEndObject(); } generator.writeEndObject(); diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json index b370e78e16011..22dc4ec13ed51 100644 --- a/client/sniffer/src/test/resources/2.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -1,140 +1,200 @@ { - "cluster_name" : "elasticsearch", - "nodes" : { - "qYUZ_8bTRwODPxukDlFw6Q" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9204", - "attributes" : { - "master" : "false" + "cluster_name": "elasticsearch", + "nodes": { + "qr-SOrELSaGW8SlU8nflBw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9200", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "data": "false", + "array.1": "1", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9200", + "[::1]:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "Yej5UVNgR2KgBjUFHOQpCw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9207", - "attributes" : { - "data" : "false", - "master" : "false" + "osfiXxUOQzCVIs-eepgSCA": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9201", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9201", + "[::1]:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "mHttJwhwReangKEx9EGuAg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9201", - "attributes" : { - "data" : "false", - "master" : "true" + "lazeJFiIQ8eHHV4GeIdMPg": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9202", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "data": "false", + "array.1": "3", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9202", + "[::1]:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "6Erdptt_QRGLxMiLi9mTkg" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9206", - "attributes" : { - "data" : "false", - "client" : "true" + "t9WxK-fNRsqV5G0Mm09KpQ": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9203", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9203", + "[::1]:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "mLRCZBypTiys6e8KY5DMnA" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9200", - "attributes" : { - "data" : "false" + "wgoDzluvTViwUjEsmVesKw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9204", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9204", + "[::1]:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "pVqOhytXQwetsZVzCBppYw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9202", - "http" : { - "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "6j_t3pPhSm-oRTyypTzu5g": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9205", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9205", + "[::1]:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "ARyzVfpJSw2a9TOIUpbsBA" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9205", - "attributes" : { - "master" : "false" + "PaEkm0z7Ssiuyfkh3aASag": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9206", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "data": "false", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9206", + "[::1]:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "2Hpid-g5Sc2BKCevhN6VQw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9203", - "attributes" : { - "master" : "false" + "LAFKr2K_QmupqnM_atJqkQ": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9207", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "data": "false", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9207", + "[::1]:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json index 7a7d143ecaf43..1358438237fc8 100644 --- a/client/sniffer/src/test/resources/5.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "DXz_rhcdSF2xJ96qyjaLVw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "0S4r3NurTYSFSb8R9SxwWA": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9200", "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "53Mi6jYdRgeR1cdyuoNfQQ" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "k_CBrMXARkS57Qb5-3Mw5g": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ "[::1]:9201", "127.0.0.1:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "XBIghcHiRlWP9c4vY6rETw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "6eynRPQ1RleJTeGDuTR9mw": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "master", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "cFM30FlyS8K1njH_bovwwQ" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "cbGC-ay1QNWaESvEh5513w": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9203", "127.0.0.1:9203" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "eoVUVRGNRDyyOapqIcrsIA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "LexndPpXR2ytYsU5fTElnQ": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ "[::1]:9204", "127.0.0.1:9204" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "xPN76uDcTP-DyXaRzPg2NQ" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "SbNG1DKYSBu20zfOz2gDZQ": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "RY0oW2d7TISEqazk-U4Kcw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "data", + "fM4H-m2WTDWmsGsL7jIJew": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "tU0rXEZmQ9GsWfn2TQ4kow" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "master", + "pFoh7d0BTbqqI3HKd9na5A": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9202", - "127.0.0.1:9202" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json index 5a8905da64c89..f0535dfdfb00f 100644 --- a/client/sniffer/src/test/resources/6.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "FX9npqGQSL2mOGF8Zkf3hw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "master", - "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9201", - "127.0.0.1:9201" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9200", + "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "jmUqzYLGTbWCg127kve3Tg" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "TMHa34w4RqeuYoHCfJGXZg": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9203", - "127.0.0.1:9203" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "soBU6bzvTOqdLxPstSbJ2g" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", + "lzaMRJTVT166sgVZdQ5thA": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "mtYDAhURTP6twdmNAkMnOg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", + "tGP5sUecSd6BLTWk1NWF8Q": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9202", - "127.0.0.1:9202" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "URxHiUQPROOt1G22Ev6lXw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "c1UgW5ROTkSa2YnM_T56tw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "_06S_kWoRqqFR8Z8CS3JRw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "QM9yjqjmS72MstpNYV_trg": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "QZE5Bd6DQJmnfVs2dglOvA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", + "wLtzAssoQYeX_4TstgCj0Q": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9204", - "127.0.0.1:9204" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "_3mTXg6dSweZn5ReB2fQqw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", + "ONOzpst8TH-ZebG7fxGwaA": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9200", - "127.0.0.1:9200" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/create_test_nodes_info.bash b/client/sniffer/src/test/resources/create_test_nodes_info.bash new file mode 100644 index 0000000000000..f4f1c09882ea8 --- /dev/null +++ b/client/sniffer/src/test/resources/create_test_nodes_info.bash @@ -0,0 +1,107 @@ +#!/bin/bash + +# Recreates the v_nodes_http.json files in this directory. This is +# meant to be an "every once in a while" thing that we do only when +# we want to add a new version of Elasticsearch or configure the +# nodes differently. That is why we don't do this in gradle. It also +# allows us to play fast and loose with error handling. If something +# goes wrong you have to manually clean up which is good because it +# leaves around the kinds of things that we need to debug the failure. + +# I built this file so the next time I have to regenerate these +# v_nodes_http.json files I won't have to reconfigure Elasticsearch +# from scratch. While I was at it I took the time to make sure that +# when we do rebuild the files they don't jump around too much. That +# way the diffs are smaller. + +set -e + +script_path="$( cd "$(dirname "$0")" ; pwd -P )" +work=$(mktemp -d) +pushd ${work} >> /dev/null +echo Working in ${work} + +wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz +sha1sum -c - << __SHAs +e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz +d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz +__SHAs +sha512sum -c - << __SHAs +25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +__SHAs + + +function do_version() { + local version=$1 + local nodes='m1 m2 m3 d1 d2 d3 c1 c2' + rm -rf ${version} + mkdir -p ${version} + pushd ${version} >> /dev/null + + tar xf ../elasticsearch-${version}.tar.gz + local http_port=9200 + for node in ${nodes}; do + mkdir ${node} + cp -r elasticsearch-${version}/* ${node} + local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) + local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) + # m2 is always master and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) + local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local transport_port=$((http_port+100)) + + cat >> ${node}/config/elasticsearch.yml << __ES_YML +node.name: ${node} +node.master: ${master} +node.data: ${data} +node${attr}.dummy: everyone_has_me +node${attr}.number: ${node:1} +node${attr}.array: [${node:0:1}, ${node:1}] +http.port: ${http_port} +transport.tcp.port: ${transport_port} +discovery.zen.minimum_master_nodes: 3 +discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__ES_YML + + if [ ${version} != '2.0.0' ]; then + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options + fi + + echo "starting ${version}/${node}..." + ${node}/bin/elasticsearch -d -p ${node}/pidfile + + ((http_port++)) + done + + echo "waiting for cluster to form" + # got to wait for all the nodes + until curl -s localhost:9200; do + sleep .25 + done + + echo "waiting for all nodes to join" + until [ $(echo ${nodes} | wc -w) -eq $(curl -s localhost:9200/_cat/nodes | wc -l) ]; do + sleep .25 + done + + # jq sorts the nodes by their http host so the file doesn't jump around when we regenerate it + curl -s localhost:9200/_nodes/http?pretty \ + | jq '[to_entries[] | ( select(.key == "nodes").value|to_entries|sort_by(.value.http.publish_address)|from_entries|{"key": "nodes", "value": .} ) // .] | from_entries' \ + > ${script_path}/${version}_nodes_http.json + + for node in ${nodes}; do + echo "stopping ${version}/${node}..." + kill $(cat ${node}/pidfile) + done + + popd >> /dev/null +} + +JAVA_HOME=$JAVA8_HOME do_version 2.0.0 +JAVA_HOME=$JAVA8_HOME do_version 5.0.0 +JAVA_HOME=$JAVA8_HOME do_version 6.0.0 + +popd >> /dev/null +rm -rf ${work} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt index ccb9bb15edb55..c6dd32a0410a5 100644 --- a/client/sniffer/src/test/resources/readme.txt +++ b/client/sniffer/src/test/resources/readme.txt @@ -2,3 +2,5 @@ few nodes in different configurations locally at various versions. They are for testing `ElasticsearchNodesSniffer` against different versions of Elasticsearch. + +See create_test_nodes_info.bash for how to create these. diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 407947000de35..1f8b302715f42 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -312,9 +312,17 @@ adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-header] -------------------------------------------------- +Or you can send requests to nodes with a particular attribute: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-attribute] +-------------------------------------------------- +<1> Replace the node selector with one that selects nodes on a particular rack. + ==== Multiple parallel asynchronous actions diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c2259c7b55d14..3ee0340387496 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -198,9 +198,7 @@ header. The warnings must match exactly. Using it looks like this: .... If the arguments to `do` include `node_selector` then the request is only -sent to nodes that match the `node_selector`. Currently only the `version` -selector is supported and it has the same logic as the `version` field in -`skip`. It looks like this: +sent to nodes that match the `node_selector`. It looks like this: .... "test id": @@ -216,6 +214,19 @@ selector is supported and it has the same logic as the `version` field in body: { foo: bar } .... +If you list multiple selectors then the request will only go to nodes that +match all of those selectors. The following selectors are supported: +* `version`: Only nodes who's version is within the range will receive the +request. The syntax for the pattern is the same as when `version` is within +`skip`. +* `attribute`: Only nodes that have an attribute matching the name and value +of the provided attribute match. Looks like: +.... + node_selector: + attribute: + name: value +.... + === `set` For some tests, it is necessary to extract a value from the previous `response`, in diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 4754ea0fc4d66..b1357522082f8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; @@ -31,6 +32,7 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; @@ -131,11 +133,10 @@ public static DoSection parse(XContentParser parser) throws IOException { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { selectorName = parser.currentName(); - } else if (token.isValue()) { - NodeSelector newSelector = buildNodeSelector( - parser.getTokenLocation(), selectorName, parser.text()); - nodeSelector = nodeSelector == NodeSelector.ANY ? - newSelector : new ComposeNodeSelector(nodeSelector, newSelector); + } else { + NodeSelector newSelector = buildNodeSelector(selectorName, parser); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); } } } else if (currentFieldName != null) { // must be part of API call then @@ -366,34 +367,64 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, not(equalTo(409))))); } - private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + private static NodeSelector buildNodeSelector(String name, XContentParser parser) throws IOException { switch (name) { + case "attribute": + return parseAttributeValuesSelector(parser); case "version": - Version[] range = SkipSection.parseVersionRange(value); - return new NodeSelector() { - @Override - public void select(Iterable nodes) { - for (Iterator itr = nodes.iterator(); itr.hasNext();) { - Node node = itr.next(); - if (node.getVersion() == null) { - throw new IllegalStateException("expected [version] metadata to be set but got " - + node); - } - Version version = Version.fromString(node.getVersion()); - if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { - itr.remove(); - } + return parseVersionSelector(parser); + default: + throw new XContentParseException(parser.getTokenLocation(), "unknown node_selector [" + name + "]"); + } + } + + private static NodeSelector parseAttributeValuesSelector(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new XContentParseException(parser.getTokenLocation(), "expected START_OBJECT"); + } + String key = null; + XContentParser.Token token; + NodeSelector result = NodeSelector.ANY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + key = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = new HasAttributeNodeSelector(key, parser.text()); + result = result == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(result, newSelector); + } else { + throw new XContentParseException(parser.getTokenLocation(), "expected [" + key + "] to be a value"); + } + } + return result; + } + + private static NodeSelector parseVersionSelector(XContentParser parser) throws IOException { + if (false == parser.currentToken().isValue()) { + throw new XContentParseException(parser.getTokenLocation(), "expected [version] to be a value"); + } + Version[] range = SkipSection.parseVersionRange(parser.text()); + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); } } + } - @Override - public String toString() { - return "version between [" + range[0] + "] and [" + range[1] + "]"; - } - }; - default: - throw new IllegalArgumentException("unknown node_selector [" + name + "]"); - } + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; } /** diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index d5ee934bc531d..e36ddc5f1c2df 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -510,7 +511,7 @@ public void testParseDoSectionExpectedWarnings() throws Exception { "just one entry this time"))); } - public void testNodeSelector() throws IOException { + public void testNodeSelectorByVersion() throws IOException { parser = createParser(YamlXContent.yamlXContent, "node_selector:\n" + " version: 5.2.0-6.0.0\n" + @@ -540,8 +541,90 @@ public void testNodeSelector() throws IOException { emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); } - private Node nodeWithVersion(String version) { - return new Node(new HttpHost("dummy"), null, null, version, null); + private static Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null, null); + } + + public void testNodeSelectorByAttribute() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr = nodeWithAttributes(singletonMap("attr", singletonList("val"))); + Node hasAttrWrongValue = nodeWithAttributes(singletonMap("attr", singletonList("notval"))); + Node notHasAttr = nodeWithAttributes(singletonMap("notattr", singletonList("val"))); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasAttr), nodes); + } + + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + " attr2: val2\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSectionWithTwoAttributes = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr2 = nodeWithAttributes(singletonMap("attr2", singletonList("val2"))); + Map> bothAttributes = new HashMap<>(); + bothAttributes.put("attr", singletonList("val")); + bothAttributes.put("attr2", singletonList("val2")); + Node hasBoth = nodeWithAttributes(bothAttributes); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + nodes.add(hasAttr2); + nodes.add(hasBoth); + doSectionWithTwoAttributes.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasBoth), nodes); + } + } + + private static Node nodeWithAttributes(Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, null, null, attributes); + } + + public void testNodeSelectorByTwoThings() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node both = nodeWithVersionAndAttributes("5.2.1", singletonMap("attr", singletonList("val"))); + Node badVersion = nodeWithVersionAndAttributes("5.1.1", singletonMap("attr", singletonList("val"))); + Node badAttr = nodeWithVersionAndAttributes("5.2.1", singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(both); + nodes.add(badVersion); + nodes.add(badAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(both), nodes); + } + + private static Node nodeWithVersionAndAttributes(String version, Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, version, null, attributes); } private void assertJsonEquals(Map actual, String expected) throws IOException { From f11fb61d3af459ee5cc522e885171c0a53c64e31 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 09:37:26 -0400 Subject: [PATCH 07/41] QA: Fix tribe tests to use node selector Node that we sniff before running yaml tests we need to use a node selector in the tribe tests or else they might contact a tribe node instead of a local node which would break the test. --- qa/smoke-test-tribe-node/build.gradle | 1 + .../src/test/resources/rest-api-spec/test/tribe/10_basic.yml | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/qa/smoke-test-tribe-node/build.gradle b/qa/smoke-test-tribe-node/build.gradle index 5b6ddd2f9f3f6..91947d2862a76 100644 --- a/qa/smoke-test-tribe-node/build.gradle +++ b/qa/smoke-test-tribe-node/build.gradle @@ -44,6 +44,7 @@ integTestCluster { setting 'http.port', '40200-40249' setting 'transport.tcp.port', '40300-40349' setting 'node.name', 'quest' + setting 'node.attr.role', 'client' setting 'tribe.one.cluster.name', 'one' setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'" setting 'tribe.one.http.enabled', 'true' diff --git a/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yml b/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yml index d70a355ac6274..4035a66606bf6 100644 --- a/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yml +++ b/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yml @@ -1,6 +1,11 @@ --- "Tribe node test": + - skip: + features: node_selector - do: + node_selector: + attribute: + role: client cat.nodes: h: name s: name From 19969c7b026fe3ad3e6b0cd95de502f6ea003c0e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 15 Jun 2018 14:16:30 -0400 Subject: [PATCH 08/41] Docs: Document changes in rest client Document the breaking changes and deprecations in we made in the rest client in 6.4. --- docs/reference/migration/migrate_6_4.asciidoc | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/docs/reference/migration/migrate_6_4.asciidoc b/docs/reference/migration/migrate_6_4.asciidoc index d7a77102c3b75..7d2b89dab9ee5 100644 --- a/docs/reference/migration/migrate_6_4.asciidoc +++ b/docs/reference/migration/migrate_6_4.asciidoc @@ -37,4 +37,27 @@ Stored script formats that don't use top level `script` object have been depreca support for these formats will be removed in the next major release. This includes `template` stored scripts format and -formats that do not encapsulate the script inside a `script` json object. \ No newline at end of file +formats that do not encapsulate the script inside a `script` json object. + +[[breaking_64_rest_client_changes]] +=== REST Client + +==== Old low level ++performRequest++s deprecated +The versions of `performRequest` and `performRequestAsync` that were in the +low level client in 6.3 have been deprecated in favor of +`performRequest(Request)` and `performRequestAsync(Request, ActionListener)`. +These will allow us to add more features to the client without adding more +variants of `performRequest`. + +==== Old high level request methods deprecated +All request methods on the high level client have been deprecated in favor +of a new variant that takes `RequestOptions`. This allows users of the high +level client to customize many options on individual requests that were not +available otherwise. + +==== HostSniffer renamed to NodeSniffer and signature changed +To provide allow the `Sniffer` to pick up more metadata we changed it from +sniffing ++HttpHost++s to sniffing ++Node++s, a new class introduced to contain +both connection information and metadata like the node's role or any +attributes defined in elasticsearch.yml. These can be used by the new +`RequestOptions#setNodeSelector`. From c6c1191dd9b547fa61a10d79051eff9d4b95ca5b Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 15 Jun 2018 11:04:11 -0700 Subject: [PATCH 09/41] [DOCS] Adds links to release notes and highlights --- docs/reference/migration/index.asciidoc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 6c654a4564531..9a034b30f2681 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -17,7 +17,10 @@ As a general rule: * Migration between non-consecutive major versions -- e.g. `2.x` to `6.x` -- is not supported. -See <> for more info. +For more information, see <>. + +See also <> and <>. + -- include::migrate_6_0.asciidoc[] From c0de50f0185f309f7fe1f5d2a2c832d1f15029c6 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 15 Jun 2018 11:39:19 -0700 Subject: [PATCH 10/41] [DOCS] Added links in breaking changes pages --- docs/reference/migration/migrate_6_1.asciidoc | 3 +++ docs/reference/migration/migrate_6_2.asciidoc | 3 +++ docs/reference/migration/migrate_6_3.asciidoc | 9 +++++++++ docs/reference/migration/migrate_6_4.asciidoc | 8 ++++++++ 4 files changed, 23 insertions(+) diff --git a/docs/reference/migration/migrate_6_1.asciidoc b/docs/reference/migration/migrate_6_1.asciidoc index 0dfb945826987..579d00fa778fc 100644 --- a/docs/reference/migration/migrate_6_1.asciidoc +++ b/docs/reference/migration/migrate_6_1.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-6.1]] == Breaking changes in 6.1 +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 6.1. + [[breaking_61_packaging]] [float] === Bootstrap checks now apply to link-local addresses diff --git a/docs/reference/migration/migrate_6_2.asciidoc b/docs/reference/migration/migrate_6_2.asciidoc index 81dac028ce300..6a78363721f80 100644 --- a/docs/reference/migration/migrate_6_2.asciidoc +++ b/docs/reference/migration/migrate_6_2.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-6.2]] == Breaking changes in 6.2 +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 6.2 + [[breaking_62_packaging]] [float] === All permission bootstrap check diff --git a/docs/reference/migration/migrate_6_3.asciidoc b/docs/reference/migration/migrate_6_3.asciidoc index 07523da849589..ddb5eb5f695ae 100644 --- a/docs/reference/migration/migrate_6_3.asciidoc +++ b/docs/reference/migration/migrate_6_3.asciidoc @@ -1,6 +1,15 @@ [[breaking-changes-6.3]] == Breaking changes in 6.3 +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 6.3. + +* <> +* <> +* <> + +See also <> and <>. + [[breaking_63_api_changes]] === API changes diff --git a/docs/reference/migration/migrate_6_4.asciidoc b/docs/reference/migration/migrate_6_4.asciidoc index 7d2b89dab9ee5..16a78fae80149 100644 --- a/docs/reference/migration/migrate_6_4.asciidoc +++ b/docs/reference/migration/migrate_6_4.asciidoc @@ -1,6 +1,14 @@ [[breaking-changes-6.4]] == Breaking changes in 6.4 +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 6.4. + +* <> +* <> + +See also <> and <>. + [[breaking_64_api_changes]] === API changes From 7bef382222c62886932bd3a4b439a06c9fcd2c6d Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 15 Jun 2018 12:39:25 -0600 Subject: [PATCH 11/41] Remove trial status info from start trial doc (#31365) This is related to #31325. There is currently information about the get-trial-status api on the start-trial api documentation page. It also has the incorrect route for that api. This commit removes that information as the start-trial page properly links to a page providing documenation about get-trial-status. --- .../en/rest-api/license/start-trial.asciidoc | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/x-pack/docs/en/rest-api/license/start-trial.asciidoc index 7754f6feef79c..341c72853fd08 100644 --- a/x-pack/docs/en/rest-api/license/start-trial.asciidoc +++ b/x-pack/docs/en/rest-api/license/start-trial.asciidoc @@ -36,24 +36,6 @@ For more information, see [float] ==== Examples -The following example checks whether you are eligible to start a trial: - -[source,js] ------------------------------------------------------------- -GET _xpack/license/start_trial ------------------------------------------------------------- -// CONSOLE -// TEST[skip:license testing issues] - -Example response: -[source,js] ------------------------------------------------------------- -{ - "eligible_to_start_trial": true -} ------------------------------------------------------------- -// NOTCONSOLE - The following example starts a 30-day trial license. The acknowledge parameter is required as you are initiating a license that will expire. From fd7a0667d59ddff096a1043f9bcaf6aa592736c9 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 15 Jun 2018 21:15:35 +0100 Subject: [PATCH 12/41] [ML] Put ML filter API response should contain the filter (#31362) --- .../xpack/core/ml/action/PutFilterAction.java | 51 +++++++++++++++++-- .../PutCalendarActionResponseTests.java | 13 ++++- .../action/PutFilterActionResponseTests.java | 31 +++++++++++ .../ml/action/TransportPutFilterAction.java | 2 +- .../rest-api-spec/test/ml/filter_crud.yml | 6 ++- .../ml/integration/DetectionRulesIT.java | 7 ++- .../MlNativeAutodetectIntegTestCase.java | 5 +- 7 files changed, 100 insertions(+), 15 deletions(-) create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java index 38e4cf9cadb8e..db82e089dc609 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -120,10 +120,53 @@ public RequestBuilder(ElasticsearchClient client) { } } - public static class Response extends AcknowledgedResponse { + public static class Response extends ActionResponse implements ToXContentObject { - public Response() { - super(true); + private MlFilter filter; + + Response() { + } + + public Response(MlFilter filter) { + this.filter = filter; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filter = new MlFilter(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + filter.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return filter.toXContent(builder, params); + } + + public MlFilter getFilter() { + return filter; + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(filter, other.filter); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java index 941de884554bf..77d4d788db620 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java @@ -5,10 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; -public class PutCalendarActionResponseTests extends AbstractStreamableTestCase { +import java.io.IOException; + +public class PutCalendarActionResponseTests extends AbstractStreamableXContentTestCase { @Override protected PutCalendarAction.Response createTestInstance() { @@ -19,4 +23,9 @@ protected PutCalendarAction.Response createTestInstance() { protected PutCalendarAction.Response createBlankInstance() { return new PutCalendarAction.Response(); } + + @Override + protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { + return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java new file mode 100644 index 0000000000000..1e697f5172a4a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionResponseTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; + +import java.io.IOException; + +public class PutFilterActionResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected PutFilterAction.Response createTestInstance() { + return new PutFilterAction.Response(MlFilterTests.createRandom()); + } + + @Override + protected PutFilterAction.Response createBlankInstance() { + return new PutFilterAction.Response(); + } + + @Override + protected PutFilterAction.Response doParseInstance(XContentParser parser) throws IOException { + return new PutFilterAction.Response(MlFilter.LENIENT_PARSER.parse(parser, null).build()); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index f7ac11e2d1aec..fc14ef085dd33 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -69,7 +69,7 @@ protected void doExecute(PutFilterAction.Request request, ActionListener { + "description": "A newly created filter", "items": ["abc", "xyz"] } - - match: { acknowledged: true } + - match: { filter_id: filter-foo2 } + - match: { description: "A newly created filter" } + - match: { items: ["abc", "xyz"]} - do: xpack.ml.get_filters: @@ -128,6 +131,7 @@ setup: - match: filters.0: filter_id: "filter-foo2" + description: "A newly created filter" items: ["abc", "xyz"] --- diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index b99170546df3b..fbda8ad716b2c 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -35,7 +35,6 @@ import java.util.Set; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isOneOf; /** @@ -121,7 +120,7 @@ public void testCondition() throws Exception { public void testScope() throws Exception { MlFilter safeIps = MlFilter.builder("safe_ips").setItems("111.111.111.111", "222.222.222.222").build(); - assertThat(putMlFilter(safeIps), is(true)); + assertThat(putMlFilter(safeIps).getFilter(), equalTo(safeIps)); DetectionRule rule = new DetectionRule.Builder(RuleScope.builder().include("ip", "safe_ips")).build(); @@ -179,7 +178,7 @@ public void testScope() throws Exception { // Now let's update the filter MlFilter updatedFilter = MlFilter.builder(safeIps.getId()).setItems("333.333.333.333").build(); - assertThat(putMlFilter(updatedFilter), is(true)); + assertThat(putMlFilter(updatedFilter).getFilter(), equalTo(updatedFilter)); // Wait until the notification that the process was updated is indexed assertBusy(() -> { @@ -230,7 +229,7 @@ public void testScopeAndCondition() throws IOException { // We have 2 IPs and they're both safe-listed. List ips = Arrays.asList("111.111.111.111", "222.222.222.222"); MlFilter safeIps = MlFilter.builder("safe_ips").setItems(ips).build(); - assertThat(putMlFilter(safeIps), is(true)); + assertThat(putMlFilter(safeIps).getFilter(), equalTo(safeIps)); // Ignore if ip in safe list AND actual < 10. DetectionRule rule = new DetectionRule.Builder(RuleScope.builder().include("ip", "safe_ips")) diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index 9057db476ad77..4e6fb03497e6a 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -419,9 +419,8 @@ protected List getForecasts(String jobId, ForecastRequestStats forecas return forecasts; } - protected boolean putMlFilter(MlFilter filter) { - PutFilterAction.Response response = client().execute(PutFilterAction.INSTANCE, new PutFilterAction.Request(filter)).actionGet(); - return response.isAcknowledged(); + protected PutFilterAction.Response putMlFilter(MlFilter filter) { + return client().execute(PutFilterAction.INSTANCE, new PutFilterAction.Request(filter)).actionGet(); } protected PutCalendarAction.Response putCalendar(String calendarId, List jobIds, String description) { From 127a80c775278a867e72ee874910a55b26e8aaaa Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 15 Jun 2018 22:14:28 +0200 Subject: [PATCH 13/41] Support for remote path in reindex api Closes #22913 (cherry picked from commit dbc9d60) --- docs/reference/docs/reindex.asciidoc | 10 +-- .../index/reindex/RestReindexAction.java | 9 ++- .../index/reindex/TransportReindexAction.java | 62 ++++++++++--------- ...ReindexFromRemoteBuildRestClientTests.java | 28 +++++---- .../ReindexFromRemoteWhitelistTests.java | 4 +- .../ReindexFromRemoteWithAuthTests.java | 5 +- .../ReindexSourceTargetValidationTests.java | 4 +- .../index/reindex/RestReindexActionTests.java | 23 +++++++ .../index/reindex/RetryTests.java | 5 +- .../index/reindex/RoundTripTests.java | 5 +- .../index/reindex/remote/RemoteInfoTests.java | 16 +++-- .../index/reindex/RemoteInfo.java | 25 +++++++- .../index/reindex/ReindexRequestTests.java | 12 ++-- 13 files changed, 135 insertions(+), 73 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index f05acab559ce1..bdbffb0a08d5d 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -422,11 +422,11 @@ POST _reindex // TEST[s/"username": "user",//] // TEST[s/"password": "pass"//] -The `host` parameter must contain a scheme, host, and port (e.g. -`https://otherhost:9200`). The `username` and `password` parameters are -optional, and when they are present `_reindex` will connect to the remote -Elasticsearch node using basic auth. Be sure to use `https` when using -basic auth or the password will be sent in plain text. +The `host` parameter must contain a scheme, host, port (e.g. +`https://otherhost:9200`) and optional path (e.g. `https://otherhost:9200/proxy`). +The `username` and `password` parameters are optional, and when they are present `_reindex` +will connect to the remote Elasticsearch node using basic auth. Be sure to use `https` when +using basic auth or the password will be sent in plain text. Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the `reindex.remote.whitelist` property. It can be set to a comma delimited list diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index f1ac681b59fdf..a5520c90b0ff5 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -57,7 +57,7 @@ */ public class RestReindexAction extends AbstractBaseReindexRestHandler { static final ObjectParser PARSER = new ObjectParser<>("reindex"); - private static final Pattern HOST_PATTERN = Pattern.compile("(?[^:]+)://(?[^:]+):(?\\d+)"); + private static final Pattern HOST_PATTERN = Pattern.compile("(?[^:]+)://(?[^:]+):(?\\d+)(?/.*)?"); static { ObjectParser.Parser sourceParser = (parser, request, context) -> { @@ -139,10 +139,12 @@ static RemoteInfo buildRemoteInfo(Map source) throws IOException String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster"); Matcher hostMatcher = HOST_PATTERN.matcher(hostInRequest); if (false == hostMatcher.matches()) { - throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port] but was [" + hostInRequest + "]"); + throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [" + + hostInRequest + "]"); } String scheme = hostMatcher.group("scheme"); String host = hostMatcher.group("host"); + String pathPrefix = hostMatcher.group("pathPrefix"); int port = Integer.parseInt(hostMatcher.group("port")); Map headers = extractStringStringMap(remote, "headers"); TimeValue socketTimeout = extractTimeValue(remote, "socket_timeout", RemoteInfo.DEFAULT_SOCKET_TIMEOUT); @@ -151,7 +153,8 @@ static RemoteInfo buildRemoteInfo(Map source) throws IOException throw new IllegalArgumentException( "Unsupported fields in [remote]: [" + Strings.collectionToCommaDelimitedString(remote.keySet()) + "]"); } - return new RemoteInfo(scheme, host, port, queryForRemote(source), username, password, headers, socketTimeout, connectTimeout); + return new RemoteInfo(scheme, host, port, pathPrefix, queryForRemote(source), + username, password, headers, socketTimeout, connectTimeout); } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 650cf5000a745..16c0669fc74ea 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; @@ -206,34 +207,39 @@ static RestClient buildRestClient(RemoteInfo remoteInfo, long taskId, List header : remoteInfo.getHeaders().entrySet()) { clientHeaders[i++] = new BasicHeader(header.getKey(), header.getValue()); } - return RestClient.builder(new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme())) - .setDefaultHeaders(clientHeaders) - .setRequestConfigCallback(c -> { - c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); - c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); - return c; - }) - .setHttpClientConfigCallback(c -> { - // Enable basic auth if it is configured - if (remoteInfo.getUsername() != null) { - UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), - remoteInfo.getPassword()); - CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, creds); - c.setDefaultCredentialsProvider(credentialsProvider); - } - // Stick the task id in the thread name so we can track down tasks from stack traces - AtomicInteger threads = new AtomicInteger(); - c.setThreadFactory(r -> { - String name = "es-client-" + taskId + "-" + threads.getAndIncrement(); - Thread t = new Thread(r, name); - threadCollector.add(t); - return t; - }); - // Limit ourselves to one reactor thread because for now the search process is single threaded. - c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); - return c; - }).build(); + final RestClientBuilder builder = + RestClient.builder(new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme())) + .setDefaultHeaders(clientHeaders) + .setRequestConfigCallback(c -> { + c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); + c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); + return c; + }) + .setHttpClientConfigCallback(c -> { + // Enable basic auth if it is configured + if (remoteInfo.getUsername() != null) { + UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), + remoteInfo.getPassword()); + CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, creds); + c.setDefaultCredentialsProvider(credentialsProvider); + } + // Stick the task id in the thread name so we can track down tasks from stack traces + AtomicInteger threads = new AtomicInteger(); + c.setThreadFactory(r -> { + String name = "es-client-" + taskId + "-" + threads.getAndIncrement(); + Thread t = new Thread(r, name); + threadCollector.add(t); + return t; + }); + // Limit ourselves to one reactor thread because for now the search process is single threaded. + c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); + return c; + }); + if (Strings.hasLength(remoteInfo.getPathPrefix()) && "/".equals(remoteInfo.getPathPrefix()) == false) { + builder.setPathPrefix(remoteInfo.getPathPrefix()); + } + return builder.build(); } /** diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java index 14e3142d226c9..db32e4813b316 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java @@ -34,20 +34,22 @@ public class ReindexFromRemoteBuildRestClientTests extends RestClientBuilderTestCase { public void testBuildRestClient() throws Exception { - RemoteInfo remoteInfo = new RemoteInfo("https", "localhost", 9200, new BytesArray("ignored"), null, null, emptyMap(), + for(final String path: new String[]{"", null, "/", "path"}) { + RemoteInfo remoteInfo = new RemoteInfo("https", "localhost", 9200, path, new BytesArray("ignored"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); - long taskId = randomLong(); - List threads = synchronizedList(new ArrayList<>()); - RestClient client = TransportReindexAction.buildRestClient(remoteInfo, taskId, threads); - try { - assertBusy(() -> assertThat(threads, hasSize(2))); - int i = 0; - for (Thread thread : threads) { - assertEquals("es-client-" + taskId + "-" + i, thread.getName()); - i++; + long taskId = randomLong(); + List threads = synchronizedList(new ArrayList<>()); + RestClient client = TransportReindexAction.buildRestClient(remoteInfo, taskId, threads); + try { + assertBusy(() -> assertThat(threads, hasSize(2))); + int i = 0; + for (Thread thread : threads) { + assertEquals("es-client-" + taskId + "-" + i, thread.getName()); + i++; + } + } finally { + client.close(); } - } finally { - client.close(); } } @@ -57,7 +59,7 @@ public void testHeaders() throws Exception { for (int i = 0; i < numHeaders; ++i) { headers.put("header" + i, Integer.toString(i)); } - RemoteInfo remoteInfo = new RemoteInfo("https", "localhost", 9200, new BytesArray("ignored"), null, null, + RemoteInfo remoteInfo = new RemoteInfo("https", "localhost", 9200, null, new BytesArray("ignored"), null, null, headers, RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); long taskId = randomLong(); List threads = synchronizedList(new ArrayList<>()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java index 128cd4043e283..e32370b166546 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWhitelistTests.java @@ -49,7 +49,7 @@ public void testLocalRequestWithWhitelist() { * Build a {@link RemoteInfo}, defaulting values that we don't care about in this test to values that don't hurt anything. */ private RemoteInfo newRemoteInfo(String host, int port) { - return new RemoteInfo(randomAlphaOfLength(5), host, port, new BytesArray("test"), null, null, emptyMap(), + return new RemoteInfo(randomAlphaOfLength(5), host, port, null, new BytesArray("test"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); } @@ -63,7 +63,7 @@ public void testWhitelistedRemote() { public void testWhitelistedByPrefix() { checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), - new RemoteInfo(randomAlphaOfLength(5), "es.example.com", 9200, new BytesArray("test"), null, null, emptyMap(), + new RemoteInfo(randomAlphaOfLength(5), "es.example.com", 9200, null, new BytesArray("test"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT)); checkRemoteWhitelist(buildRemoteWhitelist(singletonList("*.example.com:9200")), newRemoteInfo("6e134134a1.us-east-1.aws.example.com", 9200)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 31077c405d8e1..1bad1b0719960 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -100,8 +100,9 @@ public void fetchTransportAddress() { * Build a {@link RemoteInfo}, defaulting values that we don't care about in this test to values that don't hurt anything. */ private RemoteInfo newRemoteInfo(String username, String password, Map headers) { - return new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), username, password, - headers, RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); + return new RemoteInfo("http", address.getAddress(), address.getPort(), null, + new BytesArray("{\"match_all\":{}}"), username, password, headers, + RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); } public void testReindexFromRemoteWithAuthentication() throws Exception { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index 28b9febe1c289..4784d7f5fe546 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -88,10 +88,10 @@ public void testTargetIsAlias() { public void testRemoteInfoSkipsValidation() { // The index doesn't have to exist - succeeds(new RemoteInfo(randomAlphaOfLength(5), "test", 9200, new BytesArray("test"), null, null, emptyMap(), + succeeds(new RemoteInfo(randomAlphaOfLength(5), "test", 9200, null, new BytesArray("test"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT), "does_not_exist", "target"); // And it doesn't matter if they are the same index. They are considered to be different because the remote one is, well, remote. - succeeds(new RemoteInfo(randomAlphaOfLength(5), "test", 9200, new BytesArray("test"), null, null, emptyMap(), + succeeds(new RemoteInfo(randomAlphaOfLength(5), "test", 9200, null, new BytesArray("test"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT), "target", "target"); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 88fa31f423a21..b06948b90581a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -89,6 +89,7 @@ public void testBuildRemoteInfoWithAllHostParts() throws IOException { assertEquals("http", info.getScheme()); assertEquals("example.com", info.getHost()); assertEquals(9200, info.getPort()); + assertNull(info.getPathPrefix()); assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); // Didn't set the timeout so we should get the default assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); // Didn't set the timeout so we should get the default @@ -96,8 +97,30 @@ public void testBuildRemoteInfoWithAllHostParts() throws IOException { assertEquals("https", info.getScheme()); assertEquals("other.example.com", info.getHost()); assertEquals(9201, info.getPort()); + assertNull(info.getPathPrefix()); assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201/"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + assertEquals("/", info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201/proxy-path/"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + assertEquals("/proxy-path/", info.getPathPrefix()); + assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); + assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); + + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> buildRemoteInfoHostTestCase("https")); + assertEquals("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [https]", + exception.getMessage()); } public void testReindexFromRemoteRequestParsing() throws IOException { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index bd9642c2ed2e6..0a9c9df279373 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -122,8 +122,9 @@ public void testReindexFromRemote() throws Exception { assertNotNull(masterNode); TransportAddress address = masterNode.getHttp().getAddress().publishAddress(); - RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, - null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); + RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), null, + new BytesArray("{\"match_all\":{}}"), null, null, emptyMap(), + RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client).source("source").destination("dest") .setRemoteInfo(remote); return request; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 946ab030c8285..2dc4b59e8d9f9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -63,8 +63,9 @@ public void testReindexRequest() throws IOException { } TimeValue socketTimeout = parseTimeValue(randomPositiveTimeValue(), "socketTimeout"); TimeValue connectTimeout = parseTimeValue(randomPositiveTimeValue(), "connectTimeout"); - reindex.setRemoteInfo(new RemoteInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), port, query, username, password, headers, - socketTimeout, connectTimeout)); + reindex.setRemoteInfo( + new RemoteInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), port, null, + query, username, password, headers, socketTimeout, connectTimeout)); } ReindexRequest tripped = new ReindexRequest(); roundTrip(reindex, tripped); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java index d6ab599b43c2d..de0ade9c47cc3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java @@ -26,17 +26,21 @@ import static java.util.Collections.emptyMap; public class RemoteInfoTests extends ESTestCase { - private RemoteInfo newRemoteInfo(String scheme, String username, String password) { - return new RemoteInfo(scheme, "testhost", 12344, new BytesArray("testquery"), username, password, emptyMap(), + private RemoteInfo newRemoteInfo(String scheme, String prefixPath, String username, String password) { + return new RemoteInfo(scheme, "testhost", 12344, prefixPath, new BytesArray("testquery"), username, password, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); } public void testToString() { - assertEquals("host=testhost port=12344 query=testquery", newRemoteInfo("http", null, null).toString()); - assertEquals("host=testhost port=12344 query=testquery username=testuser", newRemoteInfo("http", "testuser", null).toString()); + assertEquals("host=testhost port=12344 query=testquery", + newRemoteInfo("http", null, null, null).toString()); + assertEquals("host=testhost port=12344 query=testquery username=testuser", + newRemoteInfo("http", null, "testuser", null).toString()); assertEquals("host=testhost port=12344 query=testquery username=testuser password=<<>>", - newRemoteInfo("http", "testuser", "testpass").toString()); + newRemoteInfo("http", null, "testuser", "testpass").toString()); assertEquals("scheme=https host=testhost port=12344 query=testquery username=testuser password=<<>>", - newRemoteInfo("https", "testuser", "testpass").toString()); + newRemoteInfo("https", null, "testuser", "testpass").toString()); + assertEquals("scheme=https host=testhost port=12344 pathPrefix=prxy query=testquery username=testuser password=<<>>", + newRemoteInfo("https", "prxy", "testuser", "testpass").toString()); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 8e7a990902631..70f79a9def605 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -48,6 +48,7 @@ public class RemoteInfo implements Writeable { private final String scheme; private final String host; private final int port; + private final String pathPrefix; private final BytesReference query; private final String username; private final String password; @@ -61,11 +62,12 @@ public class RemoteInfo implements Writeable { */ private final TimeValue connectTimeout; - public RemoteInfo(String scheme, String host, int port, BytesReference query, String username, String password, - Map headers, TimeValue socketTimeout, TimeValue connectTimeout) { + public RemoteInfo(String scheme, String host, int port, String pathPrefix, BytesReference query, String username, String password, + Map headers, TimeValue socketTimeout, TimeValue connectTimeout) { this.scheme = requireNonNull(scheme, "[scheme] must be specified to reindex from a remote cluster"); this.host = requireNonNull(host, "[host] must be specified to reindex from a remote cluster"); this.port = port; + this.pathPrefix = pathPrefix; this.query = requireNonNull(query, "[query] must be specified to reindex from a remote cluster"); this.username = username; this.password = password; @@ -97,6 +99,11 @@ public RemoteInfo(StreamInput in) throws IOException { socketTimeout = DEFAULT_SOCKET_TIMEOUT; connectTimeout = DEFAULT_CONNECT_TIMEOUT; } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + pathPrefix = in.readOptionalString(); + } else { + pathPrefix = null; + } } @Override @@ -116,6 +123,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(socketTimeout); out.writeTimeValue(connectTimeout); } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(pathPrefix); + } } public String getScheme() { @@ -130,6 +140,11 @@ public int getPort() { return port; } + @Nullable + public String getPathPrefix() { + return pathPrefix; + } + public BytesReference getQuery() { return query; } @@ -169,7 +184,11 @@ public String toString() { // http is the default so it isn't worth taking up space if it is the scheme b.append("scheme=").append(scheme).append(' '); } - b.append("host=").append(host).append(" port=").append(port).append(" query=").append(query.utf8ToString()); + b.append("host=").append(host).append(" port=").append(port); + if (pathPrefix != null) { + b.append(" pathPrefix=").append(pathPrefix); + } + b.append(" query=").append(query.utf8ToString()); if (username != null) { b.append(" username=").append(username); } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java index 9f4b20ff35ba3..6c1988a1440e9 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -37,8 +37,9 @@ public class ReindexRequestTests extends AbstractBulkByScrollRequestTestCase Date: Fri, 15 Jun 2018 16:06:00 -0700 Subject: [PATCH 14/41] [DOCS] Fixes small issue in release notes --- docs/reference/release-notes/6.3.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc index 1c14668e099df..8b9658b932024 100644 --- a/docs/reference/release-notes/6.3.asciidoc +++ b/docs/reference/release-notes/6.3.asciidoc @@ -227,8 +227,8 @@ Mapping:: Machine Learning:: * Synchronize long and short tests for periodicity {ml-pull}62[#62] * Improvements to trend modelling and periodicity testing for forecasting {ml-pull}7[#7] (issue: {ml-issue}5[#5]) -* [ML] Clean left behind model state docs {pull}30659[#30659] (issue: {issue}30551[#30551]) -* [ML] Hide internal Job update options from the REST API {pull}30537[#30537] (issue: {issue}30512[#30512]) +* Clean left behind model state docs {pull}30659[#30659] (issue: {issue}30551[#30551]) +* Hide internal Job update options from the REST API {pull}30537[#30537] (issue: {issue}30512[#30512]) Packaging:: * Configure heap dump path for archive packages {pull}29130[#29130] (issue: {issue}26755[#26755]) From b2ed8851f91ed73880970bcb79a6c1f9839e9a4b Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 15 Jun 2018 19:07:47 -0400 Subject: [PATCH 15/41] SQL: Fix rest endpoint names in node stats (#31371) Fixes wrong name for the sql translate endpoint and makes rest endpoint names in stats more consistent. --- .../xpack/sql/plugin/RestSqlClearCursorAction.java | 2 +- .../org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java | 2 +- .../elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java index 534d0459180e0..175b78d4f6655 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java @@ -37,6 +37,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override public String getName() { - return "sql_translate_action"; + return "xpack_sql_clear_cursor_action"; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 9e34a3fb2e097..a8daa1136d390 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -114,6 +114,6 @@ public RestResponse buildResponse(SqlQueryResponse response) throws Exception { @Override public String getName() { - return "xpack_sql_action"; + return "xpack_sql_query_action"; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java index 503ee84314820..74d94e4800606 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java @@ -40,7 +40,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override public String getName() { - return "sql_translate_action"; + return "xpack_sql_translate_action"; } } From 2df907c4397976fa4a1a0345df22a4cecb721152 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 16 Jun 2018 05:35:11 +0200 Subject: [PATCH 16/41] Add ingest-attachment support for per document `indexed_chars` limit (#31352) We today support a global `indexed_chars` processor parameter. But in some cases, users would like to set this limit depending on the document itself. It used to be supported in mapper-attachments plugin by extracting the limit value from a meta field in the document sent to indexation process. We add an option which reads this limit value from the document itself by adding a setting named `indexed_chars_field`. Which allows running: ``` PUT _ingest/pipeline/attachment { "description" : "Extract attachment information. Used to parse pdf and office files", "processors" : [ { "attachment" : { "field" : "data", "indexed_chars_field" : "size" } } ] } ``` Then index either: ``` PUT index/doc/1?pipeline=attachment { "data": "BASE64" } ``` Which will use the default value (or the one defined by `indexed_chars`) Or ``` PUT index/doc/2?pipeline=attachment { "data": "BASE64", "size": 1000 } ``` Backport of #28977 in 6.x branch (6.4.0) --- docs/plugins/ingest-attachment.asciidoc | 122 +++++++++++++++++- .../attachment/AttachmentProcessor.java | 26 +++- .../attachment/AttachmentProcessorTests.java | 63 +++++++-- .../20_attachment_processor.yml | 74 +++++++++++ 4 files changed, 264 insertions(+), 21 deletions(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 443d1fb578a6d..2f9564294d0b8 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -25,6 +25,7 @@ include::install_remove.asciidoc[] | `field` | yes | - | The field to get the base64 encoded field from | `target_field` | no | attachment | The field that will hold the attachment information | `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. +| `indexed_chars_field` | no | `null` | Field name from which you can overwrite the number of chars being used for extraction. See `indexed_chars`. | `properties` | no | all properties | Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== @@ -44,11 +45,11 @@ PUT _ingest/pipeline/attachment } ] } -PUT my_index/my_type/my_id?pipeline=attachment +PUT my_index/_doc/my_id?pipeline=attachment { "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" } -GET my_index/my_type/my_id +GET my_index/_doc/my_id -------------------------------------------------- // CONSOLE @@ -59,7 +60,7 @@ Returns this: { "found": true, "_index": "my_index", - "_type": "my_type", + "_type": "_doc", "_id": "my_id", "_version": 1, "_source": { @@ -99,6 +100,115 @@ NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines using this processor in a dedicated ingest node. +[[ingest-attachment-extracted-chars]] +==== Limit the number of extracted chars + +To prevent extracting too many chars and overload the node memory, the number of chars being used for extraction +is limited by default to `100000`. You can change this value by setting `indexed_chars`. Use `-1` for no limit but +ensure when setting this that your node will have enough HEAP to extract the content of very big documents. + +You can also define this limit per document by extracting from a given field the limit to set. If the document +has that field, it will overwrite the `indexed_chars` setting. To set this field, define the `indexed_chars_field` +setting. + +For example: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/attachment +{ + "description" : "Extract attachment information", + "processors" : [ + { + "attachment" : { + "field" : "data", + "indexed_chars" : 11, + "indexed_chars_field" : "max_size" + } + } + ] +} +PUT my_index/_doc/my_id?pipeline=attachment +{ + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" +} +GET my_index/_doc/my_id +-------------------------------------------------- +// CONSOLE + +Returns this: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_index": "my_index", + "_type": "_doc", + "_id": "my_id", + "_version": 1, + "_source": { + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + "attachment": { + "content_type": "application/rtf", + "language": "sl", + "content": "Lorem ipsum", + "content_length": 11 + } + } +} +-------------------------------------------------- +// TESTRESPONSE + + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/attachment +{ + "description" : "Extract attachment information", + "processors" : [ + { + "attachment" : { + "field" : "data", + "indexed_chars" : 11, + "indexed_chars_field" : "max_size" + } + } + ] +} +PUT my_index/_doc/my_id_2?pipeline=attachment +{ + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + "max_size": 5 +} +GET my_index/_doc/my_id_2 +-------------------------------------------------- +// CONSOLE + +Returns this: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_index": "my_index", + "_type": "_doc", + "_id": "my_id_2", + "_version": 1, + "_source": { + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + "max_size": 5, + "attachment": { + "content_type": "application/rtf", + "language": "ro", + "content": "Lorem", + "content_length": 5 + } + } +} +-------------------------------------------------- +// TESTRESPONSE + + [[ingest-attachment-with-arrays]] ==== Using the Attachment Processor with arrays @@ -150,7 +260,7 @@ PUT _ingest/pipeline/attachment } ] } -PUT my_index/my_type/my_id?pipeline=attachment +PUT my_index/_doc/my_id?pipeline=attachment { "attachments" : [ { @@ -163,7 +273,7 @@ PUT my_index/my_type/my_id?pipeline=attachment } ] } -GET my_index/my_type/my_id +GET my_index/_doc/my_id -------------------------------------------------- // CONSOLE @@ -172,7 +282,7 @@ Returns this: -------------------------------------------------- { "_index" : "my_index", - "_type" : "my_type", + "_type" : "_doc", "_id" : "my_id", "_version" : 1, "found" : true, diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index b23c627290eb0..9fb2debcb5481 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -29,7 +29,6 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; -import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; @@ -42,6 +41,7 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readIntProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalList; +import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalStringProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; public final class AttachmentProcessor extends AbstractProcessor { @@ -55,15 +55,17 @@ public final class AttachmentProcessor extends AbstractProcessor { private final Set properties; private final int indexedChars; private final boolean ignoreMissing; + private final String indexedCharsField; AttachmentProcessor(String tag, String field, String targetField, Set properties, - int indexedChars, boolean ignoreMissing) throws IOException { + int indexedChars, boolean ignoreMissing, String indexedCharsField) { super(tag); this.field = field; this.targetField = targetField; this.properties = properties; this.indexedChars = indexedChars; this.ignoreMissing = ignoreMissing; + this.indexedCharsField = indexedCharsField; } boolean isIgnoreMissing() { @@ -82,6 +84,17 @@ public void execute(IngestDocument ingestDocument) { throw new IllegalArgumentException("field [" + field + "] is null, cannot parse."); } + Integer indexedChars = this.indexedChars; + + if (indexedCharsField != null) { + // If the user provided the number of characters to be extracted as part of the document, we use it + indexedChars = ingestDocument.getFieldValue(indexedCharsField, Integer.class, true); + if (indexedChars == null) { + // If the field does not exist we fall back to the global limit + indexedChars = this.indexedChars; + } + } + Metadata metadata = new Metadata(); String parsedContent = ""; try { @@ -183,14 +196,15 @@ public AttachmentProcessor create(Map registry, Strin Map config) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "attachment"); - List properyNames = readOptionalList(TYPE, processorTag, config, "properties"); + List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); final Set properties; - if (properyNames != null) { + if (propertyNames != null) { properties = EnumSet.noneOf(Property.class); - for (String fieldName : properyNames) { + for (String fieldName : propertyNames) { try { properties.add(Property.parse(fieldName)); } catch (Exception e) { @@ -202,7 +216,7 @@ public AttachmentProcessor create(Map registry, Strin properties = DEFAULT_PROPERTIES; } - return new AttachmentProcessor(processorTag, field, targetField, properties, indexedChars, ignoreMissing); + return new AttachmentProcessor(processorTag, field, targetField, properties, indexedChars, ignoreMissing, indexedCharsField); } } diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 07e369985321a..598d3f4e8175c 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Base64; @@ -54,9 +53,9 @@ public class AttachmentProcessorTests extends ESTestCase { private AttachmentProcessor processor; @Before - public void createStandardProcessor() throws IOException { + public void createStandardProcessor() { processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false); + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false, null); } public void testEnglishTextDocument() throws Exception { @@ -89,7 +88,7 @@ public void testHtmlDocumentWithRandomFields() throws Exception { selectedProperties.add(AttachmentProcessor.Property.DATE); } processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", - "target_field", selectedProperties, 10000, false); + "target_field", selectedProperties, 10000, false, null); Map attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor); assertThat(attachmentData.keySet(), hasSize(selectedFieldNames.length)); @@ -242,7 +241,7 @@ public void testNullValueWithIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); - Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, true); + Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, true, null); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } @@ -250,7 +249,7 @@ public void testNullValueWithIgnoreMissing() throws Exception { public void testNonExistentWithIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); - Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, true); + Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, true, null); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } @@ -259,7 +258,7 @@ public void testNullWithoutIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); - Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, false); + Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, false, null); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot parse.")); } @@ -267,14 +266,20 @@ public void testNullWithoutIgnoreMissing() throws Exception { public void testNonExistentWithoutIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); - Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, false); + Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", "randomTarget", null, 10, false, null); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [source_field] not present as part of path [source_field]")); } private Map parseDocument(String file, AttachmentProcessor processor) throws Exception { + return parseDocument(file, processor, new HashMap<>()); + } + + private Map parseDocument(String file, AttachmentProcessor processor, Map optionalFields) + throws Exception { Map document = new HashMap<>(); document.put("source_field", getAsBase64(file)); + document.putAll(optionalFields); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); processor.execute(ingestDocument); @@ -284,7 +289,47 @@ private Map parseDocument(String file, AttachmentProcessor proce return attachmentData; } - protected String getAsBase64(String filename) throws Exception { + public void testIndexedChars() throws Exception { + processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, null); + + Map attachmentData = parseDocument("text-in-english.txt", processor); + + assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length")); + assertThat(attachmentData.get("language"), is("en")); + assertThat(attachmentData.get("content"), is("\"God Save the Queen")); + assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); + assertThat(attachmentData.get("content_length"), is(19L)); + + processor = new AttachmentProcessor(randomAlphaOfLength(10), "source_field", + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, "max_length"); + + attachmentData = parseDocument("text-in-english.txt", processor); + + assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length")); + assertThat(attachmentData.get("language"), is("en")); + assertThat(attachmentData.get("content"), is("\"God Save the Queen")); + assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); + assertThat(attachmentData.get("content_length"), is(19L)); + + attachmentData = parseDocument("text-in-english.txt", processor, Collections.singletonMap("max_length", 10)); + + assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length")); + assertThat(attachmentData.get("language"), is("sk")); + assertThat(attachmentData.get("content"), is("\"God Save")); + assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); + assertThat(attachmentData.get("content_length"), is(10L)); + + attachmentData = parseDocument("text-in-english.txt", processor, Collections.singletonMap("max_length", 100)); + + assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length")); + assertThat(attachmentData.get("language"), is("en")); + assertThat(attachmentData.get("content"), is("\"God Save the Queen\" (alternatively \"God Save the King\"")); + assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); + assertThat(attachmentData.get("content_length"), is(56L)); + } + + private String getAsBase64(String filename) throws Exception { String path = "/org/elasticsearch/ingest/attachment/test/sample-files/" + filename; try (InputStream is = AttachmentProcessorTests.class.getResourceAsStream(path)) { byte bytes[] = IOUtils.toByteArray(is); diff --git a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/20_attachment_processor.yml b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/20_attachment_processor.yml index cab1bfb591f7d..6a22071ba3829 100644 --- a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/20_attachment_processor.yml +++ b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/20_attachment_processor.yml @@ -112,3 +112,77 @@ - match: { _source.attachment.content: "This is an english text to tes" } - match: { _source.attachment.language: "en" } - match: { _source.attachment.content_length: 30 } + +--- +"Test indexed chars are configurable per document": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "attachment" : { + "field" : "field1", + "indexed_chars": 30, + "indexed_chars_field": "max_size" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: { field1: "VGhpcyBpcyBhbiBlbmdsaXNoIHRleHQgdG8gdGVzdCBpZiB0aGUgcGlwZWxpbmUgd29ya3M=" } + + - do: + get: + index: test + type: test + id: 1 + - length: { _source.attachment: 4 } + - match: { _source.attachment.content: "This is an english text to tes" } + - match: { _source.attachment.language: "en" } + - match: { _source.attachment.content_length: 30 } + + - do: + index: + index: test + type: test + id: 2 + pipeline: "my_pipeline" + body: { field1: "VGhpcyBpcyBhbiBlbmdsaXNoIHRleHQgdG8gdGVzdCBpZiB0aGUgcGlwZWxpbmUgd29ya3M=", "max_size": 18 } + + - do: + get: + index: test + type: test + id: 2 + - length: { _source.attachment: 4 } + - match: { _source.attachment.content: "This is an english" } + - match: { _source.attachment.language: "en" } + - match: { _source.attachment.content_length: 18 } + + - do: + index: + index: test + type: test + id: 3 + pipeline: "my_pipeline" + body: { field1: "VGhpcyBpcyBhbiBlbmdsaXNoIHRleHQgdG8gdGVzdCBpZiB0aGUgcGlwZWxpbmUgd29ya3M=", "max_size": 100000000 } + + - do: + get: + index: test + type: test + id: 3 + - length: { _source.attachment: 4 } + - match: { _source.attachment.content: "This is an english text to test if the pipeline works" } + - match: { _source.attachment.language: "en" } + - match: { _source.attachment.content_length: 54 } From eee728888ca849de26404f49ddf4f6aeae72ef6c Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Sun, 17 Jun 2018 00:14:59 +0300 Subject: [PATCH 17/41] [DOCS] Improve install and setup section for SQL JDBC --- x-pack/docs/en/sql/endpoints/jdbc.asciidoc | 133 ++++++++++++++++++++- 1 file changed, 129 insertions(+), 4 deletions(-) diff --git a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc index 9ac197048ddae..067a4c586fb8e 100644 --- a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc +++ b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc @@ -6,10 +6,133 @@ Elasticsearch's SQL jdbc driver is a rich, fully featured JDBC driver for Elasti It is Type 4 driver, meaning it is a platform independent, stand-alone, Direct to Database, pure Java driver that converts JDBC calls to Elasticsearch SQL. -// TODO add example of resolving the artifact in maven and gradle. +[float] +=== Installation -You can connect to it using the two APIs offered -by JDBC, namely `java.sql.Driver` and `DriverManager`: +The JDBC driver can be obtained either by downloading it from the https://www.elastic.co/downloads/jdbc-client[elastic.co] site or by using a http://maven.apache.org/[Maven]-compatible tool with the following dependency: + +["source","xml",subs="attributes"] +---- + + org.elasticsearch.plugin.jdbc + jdbc + {ver} + +---- + +from `artifacts.elastic.co/maven` by adding it to the repositories list: + +["source","xml",subs="attributes"] +---- + + + elastic.co + https://artifacts.elastic.co/maven + + +---- + +[[jdbc-setup]] +[float] +=== Setup + +The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`. Note the driver +also implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically +as long as its available in the classpath. + +Once registered, the driver expects the following syntax as an URL: + +["source","text",subs="attributes"] +---- +jdbc:es://<1>[http|https]?<2>[host[:port]]*<3>/[prefix]*<4>[?[option=value]&<5>]* +---- + +<1> `jdbc:es://` prefix. Mandatory. +<2> type of HTTP connection to make - `http` (default) or `https`. Optional. +<3> host (`localhost` by default) and port (`9200` by default). Optional. +<4> prefix (empty by default). Typically used when hosting {es} under a certain path. Optional. +<5> Parameters for the JDBC driver. Empty by default. Optional. + +The driver recognized the following parameters: + +[[jdbc-cfg]] +[float] +===== Essential + +`timezone` (default JVM timezone):: +Timezone used by the driver _per connection_ indicated by its `ID`. +*Highly* recommended to set it (to, say, `UTC`) as the JVM timezone can vary, is global for the entire JVM and can't be changed easily when running under a security manager. + +[[jdbc-cfg-network]] +[float] +===== Network + +`connect.timeout` (default 30s):: +Connection timeout (in seconds). That is the maximum amount of time waiting to make a connection to the server. + +`network.timeout` (default 60s):: +Network timeout (in seconds). That is the maximum amount of time waiting for the network. + +`page.timeout` (default 45s):: +Page timeout (in seconds). That is the maximum amount of time waiting for a page. + +`page.size` (default 1000):: +Page size (in entries). The number of results returned per page by the server. + +`query.timeout` (default 90s):: +Query timeout (in seconds). That is the maximum amount of time waiting for a query to return. + +[[jdbc-cfg-auth]] +[float] +==== Basic Authentication + +`user`:: Basic Authentication user name + +`password`:: Basic Authentication password + +[[jdbc-cfg-ssl]] +[float] +==== SSL + +`ssl` (default false):: Enable SSL + +`ssl.keystore.location`:: key store (if used) location + +`ssl.keystore.pass`:: key store password + +`ssl.keystore.type` (default `JKS`):: key store type. `PKCS12` is a common, alternative format + +`ssl.truststore.location`:: trust store location + +`ssl.truststore.pass`:: trust store password + +`ssl.cert.allow.self.signed` (default `false`):: Whether or not to allow self signed certificates + +`ssl.protocol`(default `TLS`):: SSL protocol to be used + +[float] +==== Proxy + +`proxy.http`:: Http proxy host name + +`proxy.socks`:: SOCKS proxy host name + + +To put all of it together, the following URL: + +["source","text",subs="attributes"] +---- +jdbc:es://http://server:3456/timezone=UTC&page.size=250 +---- + +Opens up a {es-jdbc} connection to `server` on port `3456`, setting the JDBC timezone to `UTC` and its pagesize to `250` entries. + +=== API usage + +One can use JDBC through the official `java.sql` and `javax.sql` packages: + +==== `java.sql` +The former through `java.sql.Driver` and `DriverManager`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -20,7 +143,9 @@ HTTP traffic. The port is by default 9200. <2> Properties for connecting to Elasticsearch. An empty `Properties` instance is fine for unsecured Elasticsearch. -or `javax.sql.DataSource` through +==== `javax.sql` + +Accessible through the `javax.sql.DataSource` API: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{jdbc-tests}/JdbcIntegrationTestCase.java[connect-ds] From a6e529fd406f1533158522df0476500a83b38d85 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Sun, 17 Jun 2018 00:40:01 +0300 Subject: [PATCH 18/41] [DOCS] Fix version in SQL JDBC Maven template --- x-pack/docs/en/sql/endpoints/jdbc.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc index 067a4c586fb8e..a980278810e57 100644 --- a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc +++ b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc @@ -16,7 +16,7 @@ The JDBC driver can be obtained either by downloading it from the https://www.el org.elasticsearch.plugin.jdbc jdbc - {ver} + {version} ---- From 32f70f95f0d748ea57bea872c845ed23eb720881 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Sun, 17 Jun 2018 07:22:37 -0400 Subject: [PATCH 19/41] Test: Skip alias tests that failed all weekend I'll have a look at them on Monday. --- .../rest-api-spec/test/indices.get_alias/10_basic.yml | 4 +++- .../rest-api-spec/test/indices.get_alias/20_empty.yml | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index c447d91439224..42cadb9a8b08a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -19,7 +19,9 @@ setup: --- "Get all aliases via /_alias": - + - skip: + version: all + reason: Nik will look on Monday - do: indices.create: index: test_index_3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml index 7405d99441b39..5a7c328fd5b3f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml @@ -11,7 +11,9 @@ setup: --- "Check empty aliases when getting all aliases via /_alias": - + - skip: + version: all + reason: Nik will look on Monday - do: indices.get_alias: {} From c693f67551de181cd5c578f8ddc1c7d23bd6e5d5 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 17 Jun 2018 13:32:53 +0200 Subject: [PATCH 20/41] Ensure we don't use a remote profile if cluster name matches (#31331) If we are running into a race condition between a node being configured to be a remote node for cross cluster search etc. and that node joining the cluster we might connect to that node with a remote profile. If that node now joins the cluster it connected to it as a CCS remote node we use the wrong profile and can't use bulk connections etc. anymore. This change uses the remote profile only if we connect to a node that has a different cluster name than the local cluster. This is not a perfect fix for this situation but is the safe option while potentially only loose a small optimization of using less connections per node which is small anyways since we only connect to a small set of nodes. Closes #29321 --- .../transport/RemoteClusterConnection.java | 33 ++++++- .../transport/TransportService.java | 20 ++-- .../RemoteClusterConnectionTests.java | 96 +++++++++++++++++++ .../transport/MockTcpTransport.java | 17 +++- 4 files changed, 154 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index b5557c6b534d9..cdc6b7787a198 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -91,6 +91,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); + private final ClusterName localClusterName; /** * Creates a new {@link RemoteClusterConnection} @@ -104,6 +105,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { super(settings); + this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -314,6 +316,21 @@ public boolean isClosed() { return connectHandler.isClosed(); } + private ConnectionProfile getRemoteProfile(ClusterName name) { + // we can only compare the cluster name to make a decision if we should use a remote profile + // we can't use a cluster UUID here since we could be connecting to that remote cluster before + // the remote node has joined its cluster and have a cluster UUID. The fact that we just lose a + // rather smallish optimization on the connection layer under certain situations where remote clusters + // have the same name as the local one is minor here. + // the alternative here is to complicate the remote infrastructure to also wait until we formed a cluster, + // gained a cluster UUID and then start connecting etc. we rather use this simplification in order to maintain simplicity + if (this.localClusterName.equals(name)) { + return null; + } else { + return remoteProfile; + } + } + /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -423,7 +440,6 @@ protected void doRun() { collectRemoteNodes(seedNodes.iterator(), transportService, listener); } }); - } void collectRemoteNodes(Iterator seedNodes, @@ -435,21 +451,27 @@ void collectRemoteNodes(Iterator seedNodes, if (seedNodes.hasNext()) { cancellableThreads.executeIO(() -> { final DiscoveryNode seedNode = seedNodes.next(); - final DiscoveryNode handshakeNode; + final TransportService.HandshakeResponse handshakeResponse; Transport.Connection connection = transportService.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); boolean success = false; try { try { - handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), + handshakeResponse = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); } catch (IllegalStateException ex) { logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); throw ex; } + + final DiscoveryNode handshakeNode = handshakeResponse.getDiscoveryNode(); if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { - transportService.connectToNode(handshakeNode, remoteProfile); + transportService.connectToNode(handshakeNode, getRemoteProfile(handshakeResponse.getClusterName())); + if (remoteClusterName.get() == null) { + assert handshakeResponse.getClusterName().value() != null; + remoteClusterName.set(handshakeResponse.getClusterName()); + } connectedNodes.add(handshakeNode); } ClusterStateRequest request = new ClusterStateRequest(); @@ -556,7 +578,8 @@ public void handleResponse(ClusterStateResponse response) { for (DiscoveryNode node : nodesIter) { if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { try { - transportService.connectToNode(node, remoteProfile); // noop if node is connected + transportService.connectToNode(node, getRemoteProfile(remoteClusterName.get())); // noop if node is + // connected connectedNodes.add(node); } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 7798129f6a883..656d8c3841769 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -341,8 +341,8 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection return; } transport.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { - // We don't validate cluster names to allow for tribe node connections. - final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true); + // We don't validate cluster names to allow for CCS connections. + final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true).discoveryNode; if (validateConnections && node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } @@ -378,7 +378,7 @@ public Transport.Connection openConnection(final DiscoveryNode node, ConnectionP public DiscoveryNode handshake( final Transport.Connection connection, final long handshakeTimeout) throws ConnectTransportException { - return handshake(connection, handshakeTimeout, clusterName::equals); + return handshake(connection, handshakeTimeout, clusterName::equals).discoveryNode; } /** @@ -390,11 +390,11 @@ public DiscoveryNode handshake( * @param connection the connection to a specific node * @param handshakeTimeout handshake timeout * @param clusterNamePredicate cluster name validation predicate - * @return the connected node + * @return the handshake response * @throws ConnectTransportException if the connection failed * @throws IllegalStateException if the handshake failed */ - public DiscoveryNode handshake( + public HandshakeResponse handshake( final Transport.Connection connection, final long handshakeTimeout, Predicate clusterNamePredicate) throws ConnectTransportException { final HandshakeResponse response; @@ -420,7 +420,7 @@ public HandshakeResponse newInstance() { throw new IllegalStateException("handshake failed, incompatible version [" + response.version + "] - " + node); } - return response.discoveryNode; + return response; } static class HandshakeRequest extends TransportRequest { @@ -461,6 +461,14 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); Version.writeVersion(version, out); } + + public DiscoveryNode getDiscoveryNode() { + return discoveryNode; + } + + public ClusterName getClusterName() { + return clusterName; + } } public void disconnectFromNode(DiscoveryNode node) { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index a11e804c947a5..e1fe265a4d7e2 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -149,6 +149,102 @@ public static MockTransportService startTransport( } } + public void testLocalProfileIsUsedForLocalCluster() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(seedNode)); + assertTrue(service.nodeConnected(seedNode)); + assertTrue(service.nodeConnected(discoverableNode)); + assertTrue(connection.assertNoRunningConnections()); + PlainTransportFuture futureHandler = new PlainTransportFuture<>( + new FutureTransportResponseHandler() { + @Override + public ClusterSearchShardsResponse read(StreamInput in) throws IOException { + ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse(); + inst.readFrom(in); + return inst; + } + }); + TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) + .build(); + service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), + options, futureHandler); + futureHandler.txGet(); + } + } + } + } + + public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, threadPool, + Settings.builder().put("cluster.name", "foobar").build()); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT, + threadPool, Settings.builder().put("cluster.name", "foobar").build())) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(seedNode)); + assertTrue(service.nodeConnected(seedNode)); + assertTrue(service.nodeConnected(discoverableNode)); + assertTrue(connection.assertNoRunningConnections()); + PlainTransportFuture futureHandler = new PlainTransportFuture<>( + new FutureTransportResponseHandler() { + @Override + public ClusterSearchShardsResponse read(StreamInput in) throws IOException { + ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse(); + inst.readFrom(in); + return inst; + } + }); + TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) + .build(); + IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { + service.sendRequest(discoverableNode, + ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), options, futureHandler); + futureHandler.txGet(); + }).getCause(); + assertEquals(ise.getMessage(), "can't select channel size is 0 for types: [RECOVERY, BULK, STATE]"); + + PlainTransportFuture handler = new PlainTransportFuture<>( + new FutureTransportResponseHandler() { + @Override + public ClusterSearchShardsResponse read(StreamInput in) throws IOException { + ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse(); + inst.readFrom(in); + return inst; + } + }); + TransportRequestOptions ops = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.REG) + .build(); + service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), + ops, handler); + handler.txGet(); + } + } + } + } + public void testDiscoverSingleNode() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 2202d7a7647df..0655a6d871197 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -198,7 +198,22 @@ protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeo @Override protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) { ConnectionProfile connectionProfile1 = resolveConnectionProfile(connectionProfile, defaultConnectionProfile); - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(LIGHT_PROFILE); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + Set allTypesWithConnection = new HashSet<>(); + Set allTypesWithoutConnection = new HashSet<>(); + for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile1.getHandles()) { + Set types = handle.getTypes(); + if (handle.length > 0) { + allTypesWithConnection.addAll(types); + } else { + allTypesWithoutConnection.addAll(types); + } + } + // make sure we maintain at least the types that are supported by this profile even if we only use a single channel for them. + builder.addConnections(1, allTypesWithConnection.toArray(new TransportRequestOptions.Type[0])); + if (allTypesWithoutConnection.isEmpty() == false) { + builder.addConnections(0, allTypesWithoutConnection.toArray(new TransportRequestOptions.Type[0])); + } builder.setHandshakeTimeout(connectionProfile1.getHandshakeTimeout()); builder.setConnectTimeout(connectionProfile1.getConnectTimeout()); return builder.build(); From 2e2473fb7751e458fb72aa3cb497616dd70cc065 Mon Sep 17 00:00:00 2001 From: Yuri Tceretian Date: Tue, 12 Jun 2018 14:45:27 +0300 Subject: [PATCH 21/41] Delete typos in SAML docs (#31199) Fixes a couple of small typos in SAML documentation --- x-pack/docs/en/security/authentication/saml-guide.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index a57cfaec84c43..7139f4f81987d 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -192,7 +192,7 @@ attribute.groups:: See <>. When a user connects to {kib} through your Identity Provider, the Identity Provider will supply a SAML Assertion about the user. The assertion will contain an _Authentication Statement_ indicating that the user has successfully -authenticated to the IdP and one ore more _Attribute Statements_ that will +authenticated to the IdP and one or more _Attribute Statements_ that will include _Attributes_ for the user. These attributes may include such things as: @@ -213,7 +213,7 @@ customise the URIs and their associated value. logged in, and they can be used for role mapping (below). In order for these attributes to be useful, {es} and the IdP need to have a -common via for the names of the attributes. This is done manually, by +common value for the names of the attributes. This is done manually, by configuring the IdP and the {security} SAML realm to use the same URI name for each logical user attribute. From 2d184e3fe5e5f6a2b22585d9cdb83c1f3bf3bb89 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 18 Jun 2018 10:49:22 +0200 Subject: [PATCH 22/41] [Test] Fix :example-plugins:rest-handler on Windows --- plugins/examples/rest-handler/build.gradle | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index 2c55c3c79fce7..cfe84e6a45a93 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -30,10 +30,9 @@ test.enabled = false task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn testClasses + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" executable = new File(project.runtimeJavaHome, 'bin/java') - args '-cp', "${ -> project.sourceSets.test.runtimeClasspath.asPath }", - 'org.elasticsearch.example.resthandler.ExampleFixture', - baseDir, 'TEST' + args 'org.elasticsearch.example.resthandler.ExampleFixture', baseDir, 'TEST' } integTestCluster { From a26b9b76a7ebc017e1a9421b1e2200d89dc04a9d Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Mon, 18 Jun 2018 09:46:12 +0100 Subject: [PATCH 23/41] Expose lucene's RemoveDuplicatesTokenFilter (#31275) --- docs/reference/analysis/tokenfilters.asciidoc | 4 +- .../remove-duplicates-tokenfilter.asciidoc | 5 ++ .../analysis/common/CommonAnalysisPlugin.java | 1 + .../RemoveDuplicatesTokenFilterFactory.java | 42 +++++++++++++ .../RemoveDuplicatesFilterFactoryTests.java | 61 +++++++++++++++++++ 5 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc create mode 100644 modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RemoveDuplicatesTokenFilterFactory.java create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RemoveDuplicatesFilterFactoryTests.java diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index 6e77b4498650d..dd5cb2e702cff 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -95,4 +95,6 @@ include::tokenfilters/decimal-digit-tokenfilter.asciidoc[] include::tokenfilters/fingerprint-tokenfilter.asciidoc[] -include::tokenfilters/minhash-tokenfilter.asciidoc[] \ No newline at end of file +include::tokenfilters/minhash-tokenfilter.asciidoc[] + +include::tokenfilters/remove-duplicates-tokenfilter.asciidoc[] \ No newline at end of file diff --git a/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc new file mode 100644 index 0000000000000..594e18eaf7f7e --- /dev/null +++ b/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc @@ -0,0 +1,5 @@ +[[analysis-remove-duplicates-tokenfilter]] +=== Remove Duplicates Token Filter + +A token filter of type `remove_duplicates` that drops identical tokens at the +same position. diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 722d75a9293f7..04df77245438c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -194,6 +194,7 @@ public Map> getTokenFilters() { filters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); + filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new); filters.put("reverse", ReverseTokenFilterFactory::new); filters.put("russian_stem", RussianStemTokenFilterFactory::new); filters.put("scandinavian_folding", ScandinavianFoldingFilterFactory::new); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RemoveDuplicatesTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RemoveDuplicatesTokenFilterFactory.java new file mode 100644 index 0000000000000..a136c5573121e --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RemoveDuplicatesTokenFilterFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; + +/** + * Filter factory for the lucene RemoveDuplicatesTokenFilter + */ +class RemoveDuplicatesTokenFilterFactory extends AbstractTokenFilterFactory { + + RemoveDuplicatesTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new RemoveDuplicatesTokenFilter(tokenStream); + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RemoveDuplicatesFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RemoveDuplicatesFilterFactoryTests.java new file mode 100644 index 0000000000000..8180985416f52 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RemoveDuplicatesFilterFactoryTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CannedTokenStream; +import org.apache.lucene.analysis.Token; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ESTokenStreamTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.instanceOf; + +public class RemoveDuplicatesFilterFactoryTests extends ESTokenStreamTestCase { + + public void testRemoveDuplicatesFilter() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.removedups.type", "remove_duplicates") + .build(); + ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("removedups"); + assertThat(tokenFilter, instanceOf(RemoveDuplicatesTokenFilterFactory.class)); + + CannedTokenStream cts = new CannedTokenStream( + new Token("a", 1, 0, 1), + new Token("b", 1, 2, 3), + new Token("c", 0, 2, 3), + new Token("b", 0, 2, 3), + new Token("d", 1, 4, 5) + ); + + assertTokenStreamContents(tokenFilter.create(cts), new String[]{ + "a", "b", "c", "d" + }, new int[]{ + 1, 1, 0, 1 + }); + } + +} From e81ee30865cf0fb54564f0e6b375cfa1c5c695a2 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 18 Jun 2018 11:24:43 +0200 Subject: [PATCH 24/41] Move language analyzers from server to analysis-common module. (#31300) The following analyzers were moved from server module to analysis-common module: `greek`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`, `lithuanian`, `norwegian`, `persian`, `portuguese`, `romanian`, `russian`, `sorani`, `spanish`, `swedish`, `turkish` and `thai`. Relates to #23658 --- .../analysis/common/CommonAnalysisPlugin.java | 62 ++- .../common}/GreekAnalyzerProvider.java | 6 +- .../common}/HindiAnalyzerProvider.java | 6 +- .../common}/HungarianAnalyzerProvider.java | 6 +- .../common}/IndonesianAnalyzerProvider.java | 6 +- .../common}/IrishAnalyzerProvider.java | 6 +- .../common}/ItalianAnalyzerProvider.java | 6 +- .../common}/LatvianAnalyzerProvider.java | 6 +- .../common}/LithuanianAnalyzerProvider.java | 6 +- .../common}/NorwegianAnalyzerProvider.java | 6 +- .../common}/PersianAnalyzerProvider.java | 6 +- .../common}/PortugueseAnalyzerProvider.java | 6 +- .../common}/RomanianAnalyzerProvider.java | 6 +- .../common}/RussianAnalyzerProvider.java | 6 +- .../common}/SoraniAnalyzerProvider.java | 6 +- .../common}/SpanishAnalyzerProvider.java | 6 +- .../common}/SwedishAnalyzerProvider.java | 6 +- .../common}/ThaiAnalyzerProvider.java | 6 +- .../common}/TurkishAnalyzerProvider.java | 6 +- .../test/analysis-common/20_analyzers.yml | 522 ++++++++++++++++++ .../indices/analysis/AnalysisModule.java | 36 -- .../indices/analysis/PreBuiltAnalyzers.java | 180 ------ .../indices/analysis/AnalysisModuleTests.java | 2 +- 23 files changed, 655 insertions(+), 255 deletions(-) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/GreekAnalyzerProvider.java (84%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/HindiAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/HungarianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/IndonesianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/IrishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/ItalianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/LatvianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/LithuanianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/NorwegianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/PersianAnalyzerProvider.java (84%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/PortugueseAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/RomanianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/RussianAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/SoraniAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/SpanishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/SwedishAnalyzerProvider.java (85%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/ThaiAnalyzerProvider.java (84%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/TurkishAnalyzerProvider.java (85%) diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 04df77245438c..cdd8101a73c70 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -37,6 +37,7 @@ import org.apache.lucene.analysis.cjk.CJKAnalyzer; import org.apache.lucene.analysis.cjk.CJKBigramFilter; import org.apache.lucene.analysis.cjk.CJKWidthFilter; +import org.apache.lucene.analysis.ckb.SoraniAnalyzer; import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter; import org.apache.lucene.analysis.commongrams.CommonGramsFilter; import org.apache.lucene.analysis.core.DecimalDigitFilter; @@ -52,17 +53,27 @@ import org.apache.lucene.analysis.de.GermanAnalyzer; import org.apache.lucene.analysis.de.GermanNormalizationFilter; import org.apache.lucene.analysis.de.GermanStemFilter; +import org.apache.lucene.analysis.el.GreekAnalyzer; import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; +import org.apache.lucene.analysis.es.SpanishAnalyzer; import org.apache.lucene.analysis.eu.BasqueAnalyzer; +import org.apache.lucene.analysis.fa.PersianAnalyzer; import org.apache.lucene.analysis.fa.PersianNormalizationFilter; import org.apache.lucene.analysis.fi.FinnishAnalyzer; import org.apache.lucene.analysis.fr.FrenchAnalyzer; +import org.apache.lucene.analysis.ga.IrishAnalyzer; import org.apache.lucene.analysis.gl.GalicianAnalyzer; +import org.apache.lucene.analysis.hi.HindiAnalyzer; import org.apache.lucene.analysis.hi.HindiNormalizationFilter; +import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; +import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.in.IndicNormalizationFilter; +import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; +import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; @@ -79,19 +90,26 @@ import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.analysis.nl.DutchAnalyzer; +import org.apache.lucene.analysis.no.NorwegianAnalyzer; import org.apache.lucene.analysis.path.PathHierarchyTokenizer; import org.apache.lucene.analysis.pattern.PatternTokenizer; import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter; import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter; +import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.apache.lucene.analysis.reverse.ReverseStringFilter; +import org.apache.lucene.analysis.ro.RomanianAnalyzer; +import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; import org.apache.lucene.analysis.standard.ClassicFilter; import org.apache.lucene.analysis.standard.ClassicTokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; +import org.apache.lucene.analysis.sv.SwedishAnalyzer; +import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.th.ThaiTokenizer; import org.apache.lucene.analysis.tr.ApostropheFilter; +import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; @@ -130,6 +148,8 @@ public Map>> getAn analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); analyzers.put("pattern", PatternAnalyzerProvider::new); analyzers.put("snowball", SnowballAnalyzerProvider::new); + + // Language analyzers: analyzers.put("arabic", ArabicAnalyzerProvider::new); analyzers.put("armenian", ArmenianAnalyzerProvider::new); analyzers.put("basque", BasqueAnalyzerProvider::new); @@ -147,6 +167,24 @@ public Map>> getAn analyzers.put("french", FrenchAnalyzerProvider::new); analyzers.put("galician", GalicianAnalyzerProvider::new); analyzers.put("german", GermanAnalyzerProvider::new); + analyzers.put("greek", GreekAnalyzerProvider::new); + analyzers.put("hindi", HindiAnalyzerProvider::new); + analyzers.put("hungarian", HungarianAnalyzerProvider::new); + analyzers.put("indonesian", IndonesianAnalyzerProvider::new); + analyzers.put("irish", IrishAnalyzerProvider::new); + analyzers.put("italian", ItalianAnalyzerProvider::new); + analyzers.put("latvian", LatvianAnalyzerProvider::new); + analyzers.put("lithuanian", LithuanianAnalyzerProvider::new); + analyzers.put("norwegian", NorwegianAnalyzerProvider::new); + analyzers.put("persian", PersianAnalyzerProvider::new); + analyzers.put("portuguese", PortugueseAnalyzerProvider::new); + analyzers.put("romanian", RomanianAnalyzerProvider::new); + analyzers.put("russian", RussianAnalyzerProvider::new); + analyzers.put("sorani", SoraniAnalyzerProvider::new); + analyzers.put("spanish", SpanishAnalyzerProvider::new); + analyzers.put("swedish", SwedishAnalyzerProvider::new); + analyzers.put("turkish", TurkishAnalyzerProvider::new); + analyzers.put("thai", ThaiAnalyzerProvider::new); return analyzers; } @@ -248,13 +286,15 @@ public Map> getTokenizers() { @Override public List getPreBuiltAnalyzerProviderFactories() { List analyzers = new ArrayList<>(); - analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.LUCENE, + analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.ELASTICSEARCH, () -> new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, () -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE, () -> new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET))); + + // Language analyzers: analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, ArabicAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("armenian", CachingStrategy.LUCENE, ArmenianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("basque", CachingStrategy.LUCENE, BasqueAnalyzer::new)); @@ -263,7 +303,7 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, BulgarianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, CatalanAnalyzer::new)); // chinese analyzer: only for old indices, best effort - analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.LUCENE, StandardAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, StandardAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, CJKAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, CzechAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, DanishAnalyzer::new)); @@ -273,6 +313,24 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("french", CachingStrategy.LUCENE, FrenchAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("galician", CachingStrategy.LUCENE, GalicianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("german", CachingStrategy.LUCENE, GermanAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("greek", CachingStrategy.LUCENE, GreekAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("hindi", CachingStrategy.LUCENE, HindiAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("hungarian", CachingStrategy.LUCENE, HungarianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("indonesian", CachingStrategy.LUCENE, IndonesianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("irish", CachingStrategy.LUCENE, IrishAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("italian", CachingStrategy.LUCENE, ItalianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("latvian", CachingStrategy.LUCENE, LatvianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("lithuanian", CachingStrategy.LUCENE, LithuanianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("norwegian", CachingStrategy.LUCENE, NorwegianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("persian", CachingStrategy.LUCENE, PersianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("portuguese", CachingStrategy.LUCENE, PortugueseAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("romanian", CachingStrategy.LUCENE, RomanianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("russian", CachingStrategy.LUCENE, RussianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("sorani", CachingStrategy.LUCENE, SoraniAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("spanish", CachingStrategy.LUCENE, SpanishAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("swedish", CachingStrategy.LUCENE, SwedishAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("turkish", CachingStrategy.LUCENE, TurkishAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("thai", CachingStrategy.LUCENE, ThaiAnalyzer::new)); return analyzers; } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GreekAnalyzerProvider.java similarity index 84% rename from server/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GreekAnalyzerProvider.java index 012fc64c97166..3cf1f911e48cc 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/GreekAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/GreekAnalyzerProvider.java @@ -17,18 +17,20 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.el.GreekAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final GreekAnalyzer analyzer; - public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new GreekAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GreekAnalyzer.getDefaultStopSet())); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HindiAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HindiAnalyzerProvider.java index 43ebe2677aec7..7b73c119a86f6 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/HindiAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HindiAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.hi.HindiAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final HindiAnalyzer analyzer; - public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new HindiAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, HindiAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HungarianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HungarianAnalyzerProvider.java index 81745ba0f2716..5ce948819c6ae 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/HungarianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HungarianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final HungarianAnalyzer analyzer; - public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new HungarianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, HungarianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IndonesianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IndonesianAnalyzerProvider.java index d5633025804e9..d20185deb4509 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/IndonesianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IndonesianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final IndonesianAnalyzer analyzer; - public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new IndonesianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, IndonesianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IrishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IrishAnalyzerProvider.java index 69250df736052..dae7862b76bb6 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/IrishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/IrishAnalyzerProvider.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ga.IrishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; /** * Provider for {@link IrishAnalyzer} @@ -32,7 +34,7 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ItalianAnalyzer analyzer; - public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new ItalianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ItalianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LatvianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LatvianAnalyzerProvider.java index a7731f352b997..668f91f6ea478 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/LatvianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LatvianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final LatvianAnalyzer analyzer; - public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new LatvianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, LatvianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LithuanianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LithuanianAnalyzerProvider.java index 307904b89427f..8a355c9c3f8c8 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LithuanianAnalyzerProvider.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; /** * Provider for {@link LithuanianAnalyzer} @@ -32,7 +34,7 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider

  • { private final NorwegianAnalyzer analyzer; - public NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new NorwegianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, NorwegianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java similarity index 84% rename from server/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java index ed92e3e0c02d6..74c3a95a57766 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PersianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java @@ -17,18 +17,20 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.fa.PersianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PersianAnalyzer analyzer; - public PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new PersianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, PersianAnalyzer.getDefaultStopSet())); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PortugueseAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PortugueseAnalyzerProvider.java index 73dc016fe6965..aba9c9980c9e1 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PortugueseAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PortugueseAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final PortugueseAnalyzer analyzer; - public PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new PortugueseAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, PortugueseAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RomanianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RomanianAnalyzerProvider.java index 5eeb22f5118d2..f1ff399272c38 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/RomanianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RomanianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ro.RomanianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final RomanianAnalyzer analyzer; - public RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new RomanianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, RomanianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RussianAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RussianAnalyzerProvider.java index 6e57603794cd2..2d20398a7fff8 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/RussianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/RussianAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final RussianAnalyzer analyzer; - public RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new RussianAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, RussianAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SoraniAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SoraniAnalyzerProvider.java index d3b9fcd3f5c47..e2f565a7ec560 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SoraniAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SoraniAnalyzerProvider.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.ckb.SoraniAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; /** * Provider for {@link SoraniAnalyzer} @@ -32,7 +34,7 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SpanishAnalyzer analyzer; - public SpanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + SpanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new SpanishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, SpanishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SwedishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SwedishAnalyzerProvider.java index 066d2eef78bd1..3fb3279e79a3e 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SwedishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SwedishAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final SwedishAnalyzer analyzer; - public SwedishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + SwedishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new SwedishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, SwedishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiAnalyzerProvider.java similarity index 84% rename from server/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiAnalyzerProvider.java index 119eb81d7482d..9d53659d662bc 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ThaiAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiAnalyzerProvider.java @@ -17,18 +17,20 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class ThaiAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final ThaiAnalyzer analyzer; - public ThaiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + ThaiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new ThaiAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ThaiAnalyzer.getDefaultStopSet())); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TurkishAnalyzerProvider.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TurkishAnalyzerProvider.java index 0eaf49afdc82f..10df3b2baab49 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/TurkishAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TurkishAnalyzerProvider.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final TurkishAnalyzer analyzer; - public TurkishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + TurkishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); analyzer = new TurkishAnalyzer( Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, TurkishAnalyzer.getDefaultStopSet()), diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index fa0476c0f11aa..fa8f6eef8b924 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -592,3 +592,525 @@ analyzer: my_analyzer - length: { tokens: 1 } - match: { tokens.0.token: tisch } + +--- +"greek": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: greek + + - do: + indices.analyze: + body: + text: Μία + analyzer: greek + - length: { tokens: 1 } + - match: { tokens.0.token: μια } + + - do: + indices.analyze: + index: test + body: + text: Μία + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: μια } + +--- +"hindi": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: hindi + + - do: + indices.analyze: + body: + text: हिन्दी + analyzer: hindi + - length: { tokens: 1 } + - match: { tokens.0.token: हिंद } + + - do: + indices.analyze: + index: test + body: + text: हिन्दी + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: हिंद } + +--- +"hungarian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: hungarian + + - do: + indices.analyze: + body: + text: babakocsi + analyzer: hungarian + - length: { tokens: 1 } + - match: { tokens.0.token: babakocs } + + - do: + indices.analyze: + index: test + body: + text: babakocsi + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: babakocs } + +--- +"indonesian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: indonesian + + - do: + indices.analyze: + body: + text: peledakan + analyzer: indonesian + - length: { tokens: 1 } + - match: { tokens.0.token: ledak } + + - do: + indices.analyze: + index: test + body: + text: peledakan + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: ledak } + +--- +"irish": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: irish + + - do: + indices.analyze: + body: + text: siopadóireacht + analyzer: irish + - length: { tokens: 1 } + - match: { tokens.0.token: siopadóir } + + - do: + indices.analyze: + index: test + body: + text: siopadóireacht + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: siopadóir } + +--- +"italian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: italian + + - do: + indices.analyze: + body: + text: abbandonata + analyzer: italian + - length: { tokens: 1 } + - match: { tokens.0.token: abbandonat } + + - do: + indices.analyze: + index: test + body: + text: abbandonata + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: abbandonat } + +--- +"latvian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: latvian + + - do: + indices.analyze: + body: + text: tirgiem + analyzer: latvian + - length: { tokens: 1 } + - match: { tokens.0.token: tirg } + + - do: + indices.analyze: + index: test + body: + text: tirgiem + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: tirg } + +--- +"lithuanian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: lithuanian + + - do: + indices.analyze: + body: + text: vaikų + analyzer: lithuanian + - length: { tokens: 1 } + - match: { tokens.0.token: vaik } + + - do: + indices.analyze: + index: test + body: + text: vaikų + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: vaik } + +--- +"norwegian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: norwegian + + - do: + indices.analyze: + body: + text: havnedistriktene + analyzer: norwegian + - length: { tokens: 1 } + - match: { tokens.0.token: havnedistrikt } + + - do: + indices.analyze: + index: test + body: + text: havnedistriktene + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: havnedistrikt } + +--- +"persian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: persian + + - do: + indices.analyze: + body: + text: می‌خورد + analyzer: persian + - length: { tokens: 1 } + - match: { tokens.0.token: خورد } + + - do: + indices.analyze: + index: test + body: + text: می‌خورد + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: خورد } + +--- +"portuguese": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: portuguese + + - do: + indices.analyze: + body: + text: quilométricas + analyzer: portuguese + - length: { tokens: 1 } + - match: { tokens.0.token: quilometric } + + - do: + indices.analyze: + index: test + body: + text: quilométricas + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: quilometric } + +--- +"romanian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: romanian + + - do: + indices.analyze: + body: + text: absenţa + analyzer: romanian + - length: { tokens: 1 } + - match: { tokens.0.token: absenţ } + + - do: + indices.analyze: + index: test + body: + text: absenţa + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: absenţ } + +--- +"russian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: russian + + - do: + indices.analyze: + body: + text: Вместе с тем о + analyzer: russian + - length: { tokens: 1 } + - match: { tokens.0.token: вмест } + + - do: + indices.analyze: + index: test + body: + text: Вместе с тем о + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: вмест } + +--- +"sorani": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: sorani + + - do: + indices.analyze: + body: + text: پیاوە + analyzer: sorani + - length: { tokens: 1 } + - match: { tokens.0.token: پیاو } + + - do: + indices.analyze: + index: test + body: + text: پیاوە + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: پیاو } + +--- +"spanish": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: spanish + + - do: + indices.analyze: + body: + text: chicana + analyzer: spanish + - length: { tokens: 1 } + - match: { tokens.0.token: chican } + + - do: + indices.analyze: + index: test + body: + text: chicana + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: chican } + +--- +"swedish": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: swedish + + - do: + indices.analyze: + body: + text: jaktkarlarne + analyzer: swedish + - length: { tokens: 1 } + - match: { tokens.0.token: jaktkarl } + + - do: + indices.analyze: + index: test + body: + text: jaktkarlarne + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: jaktkarl } + +--- +"turkish": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: turkish + + - do: + indices.analyze: + body: + text: ağacı + analyzer: turkish + - length: { tokens: 1 } + - match: { tokens.0.token: ağaç } + + - do: + indices.analyze: + index: test + body: + text: ağacı + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: ağaç } + +--- +"thai": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: thai + + - do: + indices.analyze: + body: + text: ๑๒๓๔ + analyzer: thai + - length: { tokens: 1 } + - match: { tokens.0.token: "1234" } + + - do: + indices.analyze: + index: test + body: + text: ๑๒๓๔ + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: "1234" } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 6b7860c0cf949..364732dc1833d 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -30,39 +30,21 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; -import org.elasticsearch.index.analysis.GreekAnalyzerProvider; -import org.elasticsearch.index.analysis.HindiAnalyzerProvider; -import org.elasticsearch.index.analysis.HungarianAnalyzerProvider; import org.elasticsearch.index.analysis.HunspellTokenFilterFactory; -import org.elasticsearch.index.analysis.IndonesianAnalyzerProvider; -import org.elasticsearch.index.analysis.IrishAnalyzerProvider; -import org.elasticsearch.index.analysis.ItalianAnalyzerProvider; import org.elasticsearch.index.analysis.KeywordAnalyzerProvider; -import org.elasticsearch.index.analysis.LatvianAnalyzerProvider; -import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider; -import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider; -import org.elasticsearch.index.analysis.PersianAnalyzerProvider; -import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider; import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; -import org.elasticsearch.index.analysis.RomanianAnalyzerProvider; -import org.elasticsearch.index.analysis.RussianAnalyzerProvider; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.analysis.SimpleAnalyzerProvider; -import org.elasticsearch.index.analysis.SoraniAnalyzerProvider; -import org.elasticsearch.index.analysis.SpanishAnalyzerProvider; import org.elasticsearch.index.analysis.StandardAnalyzerProvider; import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; import org.elasticsearch.index.analysis.StopTokenFilterFactory; -import org.elasticsearch.index.analysis.SwedishAnalyzerProvider; -import org.elasticsearch.index.analysis.ThaiAnalyzerProvider; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; -import org.elasticsearch.index.analysis.TurkishAnalyzerProvider; import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider; import org.elasticsearch.plugins.AnalysisPlugin; @@ -227,24 +209,6 @@ private NamedRegistry>> setupAnalyzers(List analyzers.register("stop", StopAnalyzerProvider::new); analyzers.register("whitespace", WhitespaceAnalyzerProvider::new); analyzers.register("keyword", KeywordAnalyzerProvider::new); - analyzers.register("greek", GreekAnalyzerProvider::new); - analyzers.register("hindi", HindiAnalyzerProvider::new); - analyzers.register("hungarian", HungarianAnalyzerProvider::new); - analyzers.register("indonesian", IndonesianAnalyzerProvider::new); - analyzers.register("irish", IrishAnalyzerProvider::new); - analyzers.register("italian", ItalianAnalyzerProvider::new); - analyzers.register("latvian", LatvianAnalyzerProvider::new); - analyzers.register("lithuanian", LithuanianAnalyzerProvider::new); - analyzers.register("norwegian", NorwegianAnalyzerProvider::new); - analyzers.register("persian", PersianAnalyzerProvider::new); - analyzers.register("portuguese", PortugueseAnalyzerProvider::new); - analyzers.register("romanian", RomanianAnalyzerProvider::new); - analyzers.register("russian", RussianAnalyzerProvider::new); - analyzers.register("sorani", SoraniAnalyzerProvider::new); - analyzers.register("spanish", SpanishAnalyzerProvider::new); - analyzers.register("swedish", SwedishAnalyzerProvider::new); - analyzers.register("turkish", TurkishAnalyzerProvider::new); - analyzers.register("thai", ThaiAnalyzerProvider::new); analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers); return analyzers; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 0e9aed3c142d9..0f31a8a46f1db 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -20,30 +20,12 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.ckb.SoraniAnalyzer; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.SimpleAnalyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.analysis.el.GreekAnalyzer; -import org.apache.lucene.analysis.es.SpanishAnalyzer; -import org.apache.lucene.analysis.fa.PersianAnalyzer; -import org.apache.lucene.analysis.ga.IrishAnalyzer; -import org.apache.lucene.analysis.hi.HindiAnalyzer; -import org.apache.lucene.analysis.hu.HungarianAnalyzer; -import org.apache.lucene.analysis.id.IndonesianAnalyzer; -import org.apache.lucene.analysis.it.ItalianAnalyzer; -import org.apache.lucene.analysis.lt.LithuanianAnalyzer; -import org.apache.lucene.analysis.lv.LatvianAnalyzer; -import org.apache.lucene.analysis.no.NorwegianAnalyzer; -import org.apache.lucene.analysis.pt.PortugueseAnalyzer; -import org.apache.lucene.analysis.ro.RomanianAnalyzer; -import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.standard.ClassicAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.analysis.sv.SwedishAnalyzer; -import org.apache.lucene.analysis.th.ThaiAnalyzer; -import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.elasticsearch.Version; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; @@ -110,168 +92,6 @@ protected Analyzer create(Version version) { a.setVersion(version.luceneVersion); return a; } - }, - - GREEK { - @Override - protected Analyzer create(Version version) { - Analyzer a = new GreekAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - HINDI { - @Override - protected Analyzer create(Version version) { - Analyzer a = new HindiAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - HUNGARIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new HungarianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - INDONESIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new IndonesianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - IRISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new IrishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - ITALIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new ItalianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - LATVIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new LatvianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - LITHUANIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new LithuanianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - NORWEGIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new NorwegianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - PERSIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new PersianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - PORTUGUESE { - @Override - protected Analyzer create(Version version) { - Analyzer a = new PortugueseAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - ROMANIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new RomanianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - RUSSIAN { - @Override - protected Analyzer create(Version version) { - Analyzer a = new RussianAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - SORANI { - @Override - protected Analyzer create(Version version) { - Analyzer a = new SoraniAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - SPANISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new SpanishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - SWEDISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new SwedishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - TURKISH { - @Override - protected Analyzer create(Version version) { - Analyzer a = new TurkishAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } - }, - - THAI { - @Override - protected Analyzer create(Version version) { - Analyzer a = new ThaiAnalyzer(); - a.setVersion(version.luceneVersion); - return a; - } }; protected abstract Analyzer create(Version version); diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index b4a07e10675ca..3e6b11f56a1b2 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -165,7 +165,7 @@ public void testVersionedAnalyzers() throws Exception { assertEquals(Version.V_5_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); assertEquals(Version.V_5_0_0.luceneVersion, - indexAnalyzers.get("thai").analyzer().getVersion()); + indexAnalyzers.get("stop").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), From 43b46d7f68103768c3cf4f3ce9100bc92496bba2 Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Mon, 18 Jun 2018 15:59:29 +0200 Subject: [PATCH 25/41] REST high-level client: add validate query API (#31077) Adds the validate query API to the high level rest client. --- .../elasticsearch/client/IndicesClient.java | 32 +++++ .../client/RequestConverters.java | 15 +++ .../elasticsearch/client/IndicesClientIT.java | 39 ++++++ .../client/RequestConvertersTests.java | 35 ++++++ .../IndicesClientDocumentationIT.java | 83 +++++++++++++ .../indices/validate_query.asciidoc | 113 ++++++++++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../validate/query/QueryExplanation.java | 82 ++++++++++++- .../validate/query/ValidateQueryRequest.java | 12 +- .../validate/query/ValidateQueryResponse.java | 53 +++++--- .../indices/RestValidateQueryAction.java | 3 +- .../validate/query/QueryExplanationTests.java | 59 +++++++++ .../query/ValidateQueryResponseTests.java | 110 +++++++++++++++++ 13 files changed, 618 insertions(+), 20 deletions(-) create mode 100644 docs/java-rest/high-level/indices/validate_query.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 5d0376efce5f6..922da32d1fa9d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -58,6 +58,8 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -1091,6 +1093,36 @@ public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, Re PutIndexTemplateResponse::fromXContent, listener, emptySet()); } + /** + * Validate a potentially expensive query without executing it. + *

    + * See Validate Query API + * on elastic.co + * @param validateQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(validateQueryRequest, RequestConverters::validateQuery, options, + ValidateQueryResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously validate a potentially expensive query without executing it. + *

    + * See Validate Query API + * on elastic.co + * @param validateQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void validateQueryAsync(ValidateQueryRequest validateQueryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(validateQueryRequest, RequestConverters::validateQuery, options, + ValidateQueryResponse::fromXContent, listener, emptySet()); + } + /** * Gets index templates using the Index Templates API * See Index Templates API diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index caeffdbe80c63..e65d9480ce6bb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -58,6 +58,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; @@ -877,6 +878,20 @@ static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) t return request; } + static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws IOException { + String[] indices = validateQueryRequest.indices() == null ? Strings.EMPTY_ARRAY : validateQueryRequest.indices(); + String[] types = validateQueryRequest.types() == null || indices.length <= 0 ? Strings.EMPTY_ARRAY : validateQueryRequest.types(); + String endpoint = endpoint(indices, types, "_validate/query"); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + Params params = new Params(request); + params.withIndicesOptions(validateQueryRequest.indicesOptions()); + params.putParam("explain", Boolean.toString(validateQueryRequest.explain())); + params.putParam("all_shards", Boolean.toString(validateQueryRequest.allShards())); + params.putParam("rewrite", Boolean.toString(validateQueryRequest.rewrite())); + request.setEntity(createEntity(validateQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getAlias(GetAliasesRequest getAliasesRequest) { String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 4a782d76ab620..1f116a4e33270 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -64,6 +65,8 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; @@ -81,6 +84,8 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -1197,6 +1202,40 @@ public void testPutTemplateBadRequests() throws Exception { assertThat(unknownSettingError.getDetailedMessage(), containsString("unknown setting [index.this-setting-does-not-exist]")); } + public void testValidateQuery() throws IOException{ + String index = "some_index"; + createIndex(index, Settings.EMPTY); + QueryBuilder builder = QueryBuilders + .boolQuery() + .must(QueryBuilders.queryStringQuery("*:*")) + .filter(QueryBuilders.termQuery("user", "kimchy")); + ValidateQueryRequest request = new ValidateQueryRequest(index).query(builder); + request.explain(randomBoolean()); + ValidateQueryResponse response = execute(request, highLevelClient().indices()::validateQuery, + highLevelClient().indices()::validateQueryAsync); + assertTrue(response.isValid()); + } + + public void testInvalidValidateQuery() throws IOException{ + String index = "shakespeare"; + + createIndex(index, Settings.EMPTY); + Request postDoc = new Request(HttpPost.METHOD_NAME, "/" + index + "/1"); + postDoc.setJsonEntity( + "{\"type\":\"act\",\"line_id\":1,\"play_name\":\"Henry IV\", \"speech_number\":\"\"," + + "\"line_number\":\"\",\"speaker\":\"\",\"text_entry\":\"ACT I\"}"); + assertOK(client().performRequest(postDoc)); + + QueryBuilder builder = QueryBuilders + .queryStringQuery("line_id:foo") + .lenient(false); + ValidateQueryRequest request = new ValidateQueryRequest(index).query(builder); + request.explain(true); + ValidateQueryResponse response = execute(request, highLevelClient().indices()::validateQuery, + highLevelClient().indices()::validateQueryAsync); + assertFalse(response.isValid()); + } + public void testGetIndexTemplate() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index c40ecc4287c50..aa58f4d775604 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -1928,6 +1929,40 @@ public void testPutTemplateRequest() throws Exception { assertToXContentBody(putTemplateRequest, request.getEntity()); } + public void testValidateQuery() throws Exception { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); + String[] types = randomBoolean() ? generateRandomStringArray(5, 5, false, false) : null; + ValidateQueryRequest validateQueryRequest; + if (randomBoolean()) { + validateQueryRequest = new ValidateQueryRequest(indices); + } else { + validateQueryRequest = new ValidateQueryRequest(); + validateQueryRequest.indices(indices); + } + validateQueryRequest.types(types); + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(validateQueryRequest::indicesOptions, validateQueryRequest::indicesOptions, expectedParams); + validateQueryRequest.explain(randomBoolean()); + validateQueryRequest.rewrite(randomBoolean()); + validateQueryRequest.allShards(randomBoolean()); + expectedParams.put("explain", Boolean.toString(validateQueryRequest.explain())); + expectedParams.put("rewrite", Boolean.toString(validateQueryRequest.rewrite())); + expectedParams.put("all_shards", Boolean.toString(validateQueryRequest.allShards())); + Request request = RequestConverters.validateQuery(validateQueryRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + if (types != null && types.length > 0) { + endpoint.add(String.join(",", types)); + } + } + endpoint.add("_validate/query"); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertToXContentBody(validateQueryRequest, request.getEntity()); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + } + public void testGetTemplateRequest() throws Exception { Map encodes = new HashMap<>(); encodes.put("log", "log"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 725a59bb24d8b..800200c64b942 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -62,6 +62,9 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; @@ -81,6 +84,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; @@ -2126,4 +2130,83 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testValidateQuery() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String index = "some_index"; + createIndex(index, Settings.EMPTY); + + // tag::validate-query-request + ValidateQueryRequest request = new ValidateQueryRequest(index); // <1> + // end::validate-query-request + + // tag::validate-query-request-query + QueryBuilder builder = QueryBuilders + .boolQuery() // <1> + .must(QueryBuilders.queryStringQuery("*:*")) + .filter(QueryBuilders.termQuery("user", "kimchy")); + request.query(builder); // <2> + // end::validate-query-request-query + + // tag::validate-query-request-explain + request.explain(true); // <1> + // end::validate-query-request-explain + + // tag::validate-query-request-allShards + request.allShards(true); // <1> + // end::validate-query-request-allShards + + // tag::validate-query-request-rewrite + request.rewrite(true); // <1> + // end::validate-query-request-rewrite + + // tag::validate-query-execute + ValidateQueryResponse response = client.indices().validateQuery(request, RequestOptions.DEFAULT); // <1> + // end::validate-query-execute + + // tag::validate-query-response + boolean isValid = response.isValid(); // <1> + int totalShards = response.getTotalShards(); // <2> + int successfulShards = response.getSuccessfulShards(); // <3> + int failedShards = response.getFailedShards(); // <4> + if (failedShards > 0) { + for(DefaultShardOperationFailedException failure: response.getShardFailures()) { // <5> + String failedIndex = failure.index(); // <6> + int shardId = failure.shardId(); // <7> + String reason = failure.reason(); // <8> + } + } + for(QueryExplanation explanation: response.getQueryExplanation()) { // <9> + String explanationIndex = explanation.getIndex(); // <10> + int shardId = explanation.getShard(); // <11> + String explanationString = explanation.getExplanation(); // <12> + } + // end::validate-query-response + + // tag::validate-query-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ValidateQueryResponse validateQueryResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::validate-query-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::validate-query-execute-async + client.indices().validateQueryAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::validate-query-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } } diff --git a/docs/java-rest/high-level/indices/validate_query.asciidoc b/docs/java-rest/high-level/indices/validate_query.asciidoc new file mode 100644 index 0000000000000..3b3b184b02875 --- /dev/null +++ b/docs/java-rest/high-level/indices/validate_query.asciidoc @@ -0,0 +1,113 @@ +[[java-rest-high-indices-validate-query]] +=== Validate Query API + +[[java-rest-high-indices-validate-query-request]] +==== Validate Query Request + +A `ValidateQueryRequest` requires one or more `indices` on which the query is validated. If no index +is provided the request is executed on all indices. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-request] +-------------------------------------------------- +<1> The index on which to run the request. + +In addition it also needs the query that needs to be validated. The query can be built using the `QueryBuilders` utility class. +The following code snippet builds a sample boolean query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-request-query] +-------------------------------------------------- +<1> Build the desired query. +<2> Set it to the request. + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-request-explain] +-------------------------------------------------- +<1> The explain parameter can be set to true to get more detailed information about why a query failed + +By default, the request is executed on a single shard only, which is randomly selected. The detailed explanation of +the query may depend on which shard is being hit, and therefore may vary from one request to another. So, in case of +query rewrite the `allShards` parameter should be used to get response from all available shards. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-request-allShards] +-------------------------------------------------- +<1> Set the allShards parameter. + +When the query is valid, the explanation defaults to the string representation of that query. With rewrite set to true, +the explanation is more detailed showing the actual Lucene query that will be executed + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-request-rewrite] +-------------------------------------------------- +<1> Set the rewrite parameter. + +[[java-rest-high-indices-validate-query-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a ValidateQueryResponse object. + +[[java-rest-high-indices-validate-query-async]] +==== Asynchronous Execution + +The asynchronous execution of a validate query request requires both the `ValidateQueryRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-execute-async] +-------------------------------------------------- +<1> The `ValidateQueryRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ValidateQueryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-indices-validate-query-response]] +==== Validate Query Response + +The returned `ValidateQueryResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[validate-query-response] +-------------------------------------------------- +<1> Check if the query is valid or not. +<2> Get total number of shards. +<3> Get number of shards that were successful. +<4> Get number of shards that failed. +<5> Get the shard failures as `DefaultShardOperationFailedException`. +<6> Get the index of a failed shard. +<7> Get the shard id of a failed shard. +<8> Get the reason for shard failure. +<9> Get the detailed explanation for the shards (if explain was set to `true`). +<10> Get the index to which a particular explanation belongs. +<11> Get the shard id to which a particular explanation belongs. +<12> Get the actual explanation string. \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index b33c2421b06d3..4cd87a521d104 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -73,6 +73,7 @@ Index Management:: * <> * <> * <> +* <> Mapping Management:: * <> @@ -103,6 +104,7 @@ include::indices/get_alias.asciidoc[] include::indices/put_settings.asciidoc[] include::indices/get_settings.asciidoc[] include::indices/put_template.asciidoc[] +include::indices/validate_query.asciidoc[] include::indices/get_templates.asciidoc[] == Cluster APIs diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index 780bf037f0e28..e330a0b8565fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -20,16 +20,57 @@ package org.elasticsearch.action.admin.indices.validate.query; import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; -public class QueryExplanation implements Streamable { +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class QueryExplanation implements Streamable, ToXContentFragment { + + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String VALID_FIELD = "valid"; + public static final String ERROR_FIELD = "error"; + public static final String EXPLANATION_FIELD = "explanation"; public static final int RANDOM_SHARD = -1; + @SuppressWarnings("unchecked") + static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "query_explanation", + true, + a -> { + int shard = RANDOM_SHARD; + if (a[1] != null) { + shard = (int)a[1]; + } + return new QueryExplanation( + (String)a[0], + shard, + (boolean)a[2], + (String)a[3], + (String)a[4] + ); + } + ); + static { + PARSER.declareString(optionalConstructorArg(), new ParseField(INDEX_FIELD)); + PARSER.declareInt(optionalConstructorArg(), new ParseField(SHARD_FIELD)); + PARSER.declareBoolean(constructorArg(), new ParseField(VALID_FIELD)); + PARSER.declareString(optionalConstructorArg(), new ParseField(EXPLANATION_FIELD)); + PARSER.declareString(optionalConstructorArg(), new ParseField(ERROR_FIELD)); + } + private String index; private int shard = RANDOM_SHARD; @@ -110,4 +151,43 @@ public static QueryExplanation readQueryExplanation(StreamInput in) throws IOEx exp.readFrom(in); return exp; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (getIndex() != null) { + builder.field(INDEX_FIELD, getIndex()); + } + if(getShard() >= 0) { + builder.field(SHARD_FIELD, getShard()); + } + builder.field(VALID_FIELD, isValid()); + if (getError() != null) { + builder.field(ERROR_FIELD, getError()); + } + if (getExplanation() != null) { + builder.field(EXPLANATION_FIELD, getExplanation()); + } + return builder; + } + + public static QueryExplanation fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryExplanation other = (QueryExplanation) o; + return Objects.equals(getIndex(), other.getIndex()) && + Objects.equals(getShard(), other.getShard()) && + Objects.equals(isValid(), other.isValid()) && + Objects.equals(getError(), other.getError()) && + Objects.equals(getExplanation(), other.getExplanation()); + } + + @Override + public int hashCode() { + return Objects.hash(getIndex(), getShard(), isValid(), getError(), getExplanation()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 5953a5548c465..7694e7583c898 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -27,6 +27,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -38,7 +40,7 @@ *

    * The request requires the query to be set using {@link #query(QueryBuilder)} */ -public class ValidateQueryRequest extends BroadcastRequest { +public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { private QueryBuilder query = new MatchAllQueryBuilder(); @@ -179,4 +181,12 @@ public String toString() { return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", query[" + query + "], explain:" + explain + ", rewrite:" + rewrite + ", all_shards:" + allShards; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("query"); + query.toXContent(builder, params); + return builder.endObject(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 5bb11dd56e00b..f766e1d9c6aa4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -21,16 +21,22 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * The response of the validate action. @@ -39,12 +45,33 @@ */ public class ValidateQueryResponse extends BroadcastResponse { - public static final String INDEX_FIELD = "index"; - public static final String SHARD_FIELD = "shard"; public static final String VALID_FIELD = "valid"; public static final String EXPLANATIONS_FIELD = "explanations"; - public static final String ERROR_FIELD = "error"; - public static final String EXPLANATION_FIELD = "explanation"; + + @SuppressWarnings("unchecked") + static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "validate_query", + true, + arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + return + new ValidateQueryResponse( + (boolean)arg[1], + (List)arg[2], + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()) + ); + } + ); + static { + declareBroadcastFields(PARSER); + PARSER.declareBoolean(constructorArg(), new ParseField(VALID_FIELD)); + PARSER.declareObjectArray( + optionalConstructorArg(), QueryExplanation.PARSER, new ParseField(EXPLANATIONS_FIELD) + ); + } private boolean valid; @@ -112,22 +139,14 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.startArray(EXPLANATIONS_FIELD); for (QueryExplanation explanation : getQueryExplanation()) { builder.startObject(); - if (explanation.getIndex() != null) { - builder.field(INDEX_FIELD, explanation.getIndex()); - } - if(explanation.getShard() >= 0) { - builder.field(SHARD_FIELD, explanation.getShard()); - } - builder.field(VALID_FIELD, explanation.isValid()); - if (explanation.getError() != null) { - builder.field(ERROR_FIELD, explanation.getError()); - } - if (explanation.getExplanation() != null) { - builder.field(EXPLANATION_FIELD, explanation.getExplanation()); - } + explanation.toXContent(builder, params); builder.endObject(); } builder.endArray(); } } + + public static ValidateQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index 57486396f911b..d1a97d74d047f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.indices; +import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -101,7 +102,7 @@ private static BytesRestResponse buildErrorResponse(XContentBuilder builder, Str builder.startObject(); builder.field(ValidateQueryResponse.VALID_FIELD, false); if (explain) { - builder.field(ValidateQueryResponse.ERROR_FIELD, error); + builder.field(QueryExplanation.ERROR_FIELD, error); } builder.endObject(); return new BytesRestResponse(OK, builder); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java new file mode 100644 index 0000000000000..db167e0c7669e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanationTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.validate.query; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; + +public class QueryExplanationTests extends AbstractStreamableXContentTestCase { + + static QueryExplanation createRandomQueryExplanation(boolean isValid) { + String index = "index_" + randomInt(1000); + int shard = randomInt(100); + Boolean valid = isValid; + String errorField = null; + if (!valid) { + errorField = randomAlphaOfLength(randomIntBetween(10, 100)); + } + String explanation = randomAlphaOfLength(randomIntBetween(10, 100)); + return new QueryExplanation(index, shard, valid, explanation, errorField); + } + + static QueryExplanation createRandomQueryExplanation() { + return createRandomQueryExplanation(randomBoolean()); + } + + @Override + protected QueryExplanation doParseInstance(XContentParser parser) throws IOException { + return QueryExplanation.fromXContent(parser); + } + + @Override + protected QueryExplanation createBlankInstance() { + return new QueryExplanation(); + } + + @Override + protected QueryExplanation createTestInstance() { + return createRandomQueryExplanation(); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java new file mode 100644 index 0000000000000..d72aae8fa2bd1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.validate.query; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class ValidateQueryResponseTests extends AbstractBroadcastResponseTestCase { + + private static ValidateQueryResponse createRandomValidateQueryResponse( + int totalShards, int successfulShards, int failedShards, List failures) { + boolean valid = failedShards == 0; + List queryExplanations = new ArrayList<>(totalShards); + for(DefaultShardOperationFailedException failure: failures) { + queryExplanations.add( + new QueryExplanation( + failure.index(), failure.shardId(), false, failure.reason(), null + ) + ); + } + return new ValidateQueryResponse( + valid, queryExplanations, totalShards, successfulShards, failedShards, failures + ); + } + + private static ValidateQueryResponse createRandomValidateQueryResponse() { + int totalShards = randomIntBetween(1, 10); + int successfulShards = randomIntBetween(0, totalShards); + int failedShards = totalShards - successfulShards; + boolean valid = failedShards == 0; + List queryExplanations = new ArrayList<>(totalShards); + List shardFailures = new ArrayList<>(failedShards); + for (int i=0; i queryExplSet = new HashSet<>(response.getQueryExplanation()); + assertEquals(response.isValid(), parsedResponse.isValid()); + assertEquals(response.getQueryExplanation().size(), parsedResponse.getQueryExplanation().size()); + assertTrue(queryExplSet.containsAll(parsedResponse.getQueryExplanation())); + } + + @Override + protected ValidateQueryResponse createTestInstance(int totalShards, int successfulShards, int failedShards, + List failures) { + return createRandomValidateQueryResponse(totalShards, successfulShards, failedShards, failures); + } + + @Override + public void testToXContent() { + ValidateQueryResponse response = createTestInstance(10, 10, 0, new ArrayList<>()); + String output = Strings.toString(response); + assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0},\"valid\":true}", output); + } +} From 2f9880d00432e926621d7a09255baa152e508897 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 18 Jun 2018 08:52:57 -0700 Subject: [PATCH 26/41] [DOCS] Removes breaking change (#31376) --- docs/reference/release-notes/6.3.asciidoc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc index 8b9658b932024..467f81f93384d 100644 --- a/docs/reference/release-notes/6.3.asciidoc +++ b/docs/reference/release-notes/6.3.asciidoc @@ -1,5 +1,5 @@ [[release-notes-6.3.0]] -== 6.3.0 Release Notes +== {es} version 6.3.0 Also see <>. @@ -31,10 +31,6 @@ Security:: with an SPI based extension mechanism that is installed and built as an elasticsearch plugin. -Task Management:: -* Remove metadata customs that can break serialization {pull}30945[#30945] (issues: {issue}30731[#30731], {issue}30857[#30857]) - - [[breaking-java-6.3.0]] [float] === Breaking Java changes From 74727a198543ba3f062704d54f8ba08a236ca28e Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 18 Jun 2018 08:58:12 -0700 Subject: [PATCH 27/41] [DOCS] Removes ML item from release highlights --- docs/reference/release-notes/highlights-6.3.0.asciidoc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/reference/release-notes/highlights-6.3.0.asciidoc b/docs/reference/release-notes/highlights-6.3.0.asciidoc index 94c89b5755713..fb396d2832bcc 100644 --- a/docs/reference/release-notes/highlights-6.3.0.asciidoc +++ b/docs/reference/release-notes/highlights-6.3.0.asciidoc @@ -50,10 +50,3 @@ versions (and EOL dates), you can continue to use Java 8. See the https://www.elastic.co/support/matrix#matrix_jvm[support matrix] for all of the JVM options for {es}. -[float] -=== Improved trend modeling and periodicity testing for forecasting - -{stack-ov}/ml-overview.html#ml-forecasting[Forecasting] is now more reliable and -has greatly improved confidence intervals--particularly for longer time ranges. -These improvements also affect trend and seasonality modeling during anomaly -detection. From fe8c5693bbf402c78058d4282ee354343bd9df86 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 18 Jun 2018 08:48:23 -0700 Subject: [PATCH 28/41] [DOCS] Adds testing for security APIs (#31345) --- x-pack/docs/build.gradle | 4 ---- .../en/rest-api/security/authenticate.asciidoc | 14 ++++++++------ x-pack/docs/en/rest-api/security/ssl.asciidoc | 1 + x-pack/docs/en/rest-api/security/tokens.asciidoc | 1 + x-pack/docs/en/rest-api/security/users.asciidoc | 1 + 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 7662cdd2e4bbd..371a8ce4acacf 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -16,9 +16,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/ml/functions/rare.asciidoc', 'en/ml/functions/sum.asciidoc', 'en/ml/functions/time.asciidoc', - 'en/rest-api/security/ssl.asciidoc', - 'en/rest-api/security/users.asciidoc', - 'en/rest-api/security/tokens.asciidoc', 'en/rest-api/watcher/put-watch.asciidoc', 'en/security/authentication/user-cache.asciidoc', 'en/security/authorization/field-and-document-access-control.asciidoc', @@ -76,7 +73,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/update-snapshot.asciidoc', 'en/rest-api/ml/validate-detector.asciidoc', 'en/rest-api/ml/validate-job.asciidoc', - 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] diff --git a/x-pack/docs/en/rest-api/security/authenticate.asciidoc b/x-pack/docs/en/rest-api/security/authenticate.asciidoc index ba837ddfd2c20..ab259762332f9 100644 --- a/x-pack/docs/en/rest-api/security/authenticate.asciidoc +++ b/x-pack/docs/en/rest-api/security/authenticate.asciidoc @@ -35,12 +35,14 @@ The following example output provides information about the "rdeniro" user: -------------------------------------------------- { "username": "rdeniro", - "roles": [ - "admin", - "kibana4" + "roles": [ + "admin" ], - "metadata" : { - "employee_id": "8675309" - } + "full_name": null, + "email": null, + "metadata": { }, + "enabled": true } -------------------------------------------------- +// TESTRESPONSE[s/"rdeniro"/"$body.username"/] +// TESTRESPONSE[s/"admin"/"superuser"/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/security/ssl.asciidoc b/x-pack/docs/en/rest-api/security/ssl.asciidoc index f7a40c6d87607..6462699570fb0 100644 --- a/x-pack/docs/en/rest-api/security/ssl.asciidoc +++ b/x-pack/docs/en/rest-api/security/ssl.asciidoc @@ -109,3 +109,4 @@ The API returns the following results: } ] ---- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/security/tokens.asciidoc b/x-pack/docs/en/rest-api/security/tokens.asciidoc index 70f255ead37c0..f991a5c0cb836 100644 --- a/x-pack/docs/en/rest-api/security/tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/tokens.asciidoc @@ -98,6 +98,7 @@ by the value of the `access_token`. -------------------------------------------------- curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" http://localhost:9200/_cluster/health -------------------------------------------------- +// NOTCONSOLE [[security-api-refresh-token]] To extend the life of an existing token, the token api may be called again with the refresh diff --git a/x-pack/docs/en/rest-api/security/users.asciidoc b/x-pack/docs/en/rest-api/security/users.asciidoc index 926193481afbc..c84da5c7d75ff 100644 --- a/x-pack/docs/en/rest-api/security/users.asciidoc +++ b/x-pack/docs/en/rest-api/security/users.asciidoc @@ -115,6 +115,7 @@ authenticated. For example: -------------------------------------------------- curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health -------------------------------------------------- +// NOTCONSOLE [[security-api-get-user]] To retrieve a native user, submit a GET request to the `/_xpack/security/user/` From 78869eb71e700dbde9238d62e78ccc6a78bf9470 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 18 Jun 2018 16:46:04 +0200 Subject: [PATCH 29/41] Use system context for cluster state update tasks (#31241) This commit makes it so that cluster state update tasks always run under the system context, only restoring the original context when the listener that was provided with the task is called. A notable exception is the clusterStatePublished(...) callback which will still run under system context, because it's defined on the executor-level, and not the task level, and only called once for the combined batch of tasks and can therefore not be uniquely identified with a task / thread context. Relates #30603 --- .../cluster/ClusterStateTaskExecutor.java | 3 + .../cluster/ClusterStateUpdateTask.java | 6 ++ .../cluster/service/MasterService.java | 35 ++++--- .../transport/RemoteClusterConnection.java | 2 - .../cluster/service/MasterServiceTests.java | 82 +++++++++++++++++ .../xpack/core/ml/MlMetadata.java | 13 ++- .../core/ml/datafeed/DatafeedUpdate.java | 9 +- .../core/ml/datafeed/DatafeedUpdateTests.java | 8 +- .../ml/action/TransportDeleteJobAction.java | 3 +- .../ml/action/TransportPutDatafeedAction.java | 14 +-- .../action/TransportUpdateDatafeedAction.java | 6 +- .../xpack/ml/job/JobManager.java | 5 +- .../xpack/ml/MlMetadataTests.java | 92 ++++++++++++++----- .../action/TransportCloseJobActionTests.java | 2 +- .../TransportStartDatafeedActionTests.java | 6 +- .../TransportStopDatafeedActionTests.java | 12 +-- .../ml/datafeed/DatafeedManagerTests.java | 2 +- .../datafeed/DatafeedNodeSelectorTests.java | 23 +++-- .../xpack/ml/integration/DeleteJobIT.java | 5 +- 19 files changed, 236 insertions(+), 92 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 024389dd22c7f..6c536a7019bb0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -41,6 +41,9 @@ default boolean runOnlyOnMaster() { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. + * + * Note that this method will be executed using system context. + * * @param clusterChangedEvent the change event for this cluster state change, containing * both old and new states */ diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index b298e7e915dea..9dc9c7f6f52d0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -62,6 +62,12 @@ public String describeTasks(List tasks) { */ public abstract void onFailure(String source, Exception e); + @Override + public final void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + // final, empty implementation here as this method should only be defined in combination + // with a batching executor as it will always be executed within the system context. + } + /** * If the cluster state update task wasn't processed by the provided timeout, call * {@link ClusterStateTaskListener#onFailure(String, Exception)}. May return null to indicate no timeout is needed (default). diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 4432d864fd36a..2543be4811c1e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.threadpool.ThreadPool; @@ -59,6 +60,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.cluster.service.ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING; @@ -426,26 +428,28 @@ public TimeValue getMaxTaskWaitTime() { return threadPoolExecutor.getMaxTaskWaitTime(); } - private SafeClusterStateTaskListener safe(ClusterStateTaskListener listener) { + private SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Supplier contextSupplier) { if (listener instanceof AckedClusterStateTaskListener) { - return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger); + return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, contextSupplier, logger); } else { - return new SafeClusterStateTaskListener(listener, logger); + return new SafeClusterStateTaskListener(listener, contextSupplier, logger); } } private static class SafeClusterStateTaskListener implements ClusterStateTaskListener { private final ClusterStateTaskListener listener; + protected final Supplier context; private final Logger logger; - SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) { + SafeClusterStateTaskListener(ClusterStateTaskListener listener, Supplier context, Logger logger) { this.listener = listener; + this.context = context; this.logger = logger; } @Override public void onFailure(String source, Exception e) { - try { + try (ThreadContext.StoredContext ignore = context.get()) { listener.onFailure(source, e); } catch (Exception inner) { inner.addSuppressed(e); @@ -456,7 +460,7 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - try { + try (ThreadContext.StoredContext ignore = context.get()) { listener.onNoLongerMaster(source); } catch (Exception e) { logger.error(() -> new ParameterizedMessage( @@ -466,7 +470,7 @@ public void onNoLongerMaster(String source) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - try { + try (ThreadContext.StoredContext ignore = context.get()) { listener.clusterStateProcessed(source, oldState, newState); } catch (Exception e) { logger.error(() -> new ParameterizedMessage( @@ -480,8 +484,9 @@ private static class SafeAckedClusterStateTaskListener extends SafeClusterStateT private final AckedClusterStateTaskListener listener; private final Logger logger; - SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Logger logger) { - super(listener, logger); + SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, Supplier context, + Logger logger) { + super(listener, context, logger); this.listener = listener; this.logger = logger; } @@ -493,7 +498,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { @Override public void onAllNodesAcked(@Nullable Exception e) { - try { + try (ThreadContext.StoredContext ignore = context.get()) { listener.onAllNodesAcked(e); } catch (Exception inner) { inner.addSuppressed(e); @@ -503,7 +508,7 @@ public void onAllNodesAcked(@Nullable Exception e) { @Override public void onAckTimeout() { - try { + try (ThreadContext.StoredContext ignore = context.get()) { listener.onAckTimeout(); } catch (Exception e) { logger.error("exception thrown by listener while notifying on ack timeout", e); @@ -724,9 +729,13 @@ public void submitStateUpdateTasks(final String source, if (!lifecycle.started()) { return; } - try { + final ThreadContext threadContext = threadPool.getThreadContext(); + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + List safeTasks = tasks.entrySet().stream() - .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e.getKey(), safe(e.getValue()), executor)) + .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e.getKey(), safe(e.getValue(), supplier), executor)) .collect(Collectors.toList()); taskBatcher.submitTasks(safeTasks, config.timeout()); } catch (EsRejectedExecutionException e) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index cdc6b7787a198..7fc13e252768b 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -560,7 +560,6 @@ public ClusterStateResponse newInstance() { @Override public void handleResponse(ClusterStateResponse response) { - assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; try { if (remoteClusterName.get() == null) { assert response.getClusterName().value() != null; @@ -601,7 +600,6 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException exp) { - assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); try { IOUtils.closeWhileHandlingException(connection); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index f75363c7ab5c7..20587d31f5359 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -34,12 +34,14 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.BaseFuture; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -52,6 +54,7 @@ import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -168,6 +171,85 @@ public void onFailure(String source, Exception e) { nonMaster.close(); } + public void testThreadContext() throws InterruptedException { + final TimedMasterService master = createTimedMasterService(true); + final CountDownLatch latch = new CountDownLatch(1); + + try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { + final Map expectedHeaders = Collections.singletonMap("test", "test"); + threadPool.getThreadContext().putHeader(expectedHeaders); + + final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); + final TimeValue masterTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); + + master.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + @Override + public ClusterState execute(ClusterState currentState) { + assertTrue(threadPool.getThreadContext().isSystemContext()); + assertEquals(Collections.emptyMap(), threadPool.getThreadContext().getHeaders()); + + if (randomBoolean()) { + return ClusterState.builder(currentState).build(); + } else if (randomBoolean()) { + return currentState; + } else { + throw new IllegalArgumentException("mock failure"); + } + } + + @Override + public void onFailure(String source, Exception e) { + assertFalse(threadPool.getThreadContext().isSystemContext()); + assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + latch.countDown(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + assertFalse(threadPool.getThreadContext().isSystemContext()); + assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + latch.countDown(); + } + + @Override + protected Void newResponse(boolean acknowledged) { + return null; + } + + public TimeValue ackTimeout() { + return ackTimeout; + } + + @Override + public TimeValue timeout() { + return masterTimeout; + } + + @Override + public void onAllNodesAcked(@Nullable Exception e) { + assertFalse(threadPool.getThreadContext().isSystemContext()); + assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + latch.countDown(); + } + + @Override + public void onAckTimeout() { + assertFalse(threadPool.getThreadContext().isSystemContext()); + assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + latch.countDown(); + } + + }); + + assertFalse(threadPool.getThreadContext().isSystemContext()); + assertEquals(expectedHeaders, threadPool.getThreadContext().getHeaders()); + } + + latch.await(); + + master.close(); + } + /* * test that a listener throwing an exception while handling a * notification does not prevent publication notification to the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 5e145306f8c1f..85e5c99fe3581 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -293,7 +292,7 @@ public Builder deleteJob(String jobId, PersistentTasksCustomMetaData tasks) { return this; } - public Builder putDatafeed(DatafeedConfig datafeedConfig, ThreadContext threadContext) { + public Builder putDatafeed(DatafeedConfig datafeedConfig, Map headers) { if (datafeeds.containsKey(datafeedConfig.getId())) { throw new ResourceAlreadyExistsException("A datafeed with id [" + datafeedConfig.getId() + "] already exists"); } @@ -302,13 +301,13 @@ public Builder putDatafeed(DatafeedConfig datafeedConfig, ThreadContext threadCo Job job = jobs.get(jobId); DatafeedJobValidator.validate(datafeedConfig, job); - if (threadContext != null) { + if (headers.isEmpty() == false) { // Adjust the request, adding security headers from the current thread context DatafeedConfig.Builder builder = new DatafeedConfig.Builder(datafeedConfig); - Map headers = threadContext.getHeaders().entrySet().stream() + Map securityHeaders = headers.entrySet().stream() .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - builder.setHeaders(headers); + builder.setHeaders(securityHeaders); datafeedConfig = builder.build(); } @@ -328,7 +327,7 @@ private void checkJobIsAvailableForDatafeed(String jobId) { } } - public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaData persistentTasks, ThreadContext threadContext) { + public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaData persistentTasks, Map headers) { String datafeedId = update.getId(); DatafeedConfig oldDatafeedConfig = datafeeds.get(datafeedId); if (oldDatafeedConfig == null) { @@ -336,7 +335,7 @@ public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaDa } checkDatafeedIsStopped(() -> Messages.getMessage(Messages.DATAFEED_CANNOT_UPDATE_IN_CURRENT_STATE, datafeedId, DatafeedState.STARTED), datafeedId, persistentTasks); - DatafeedConfig newDatafeedConfig = update.apply(oldDatafeedConfig, threadContext); + DatafeedConfig newDatafeedConfig = update.apply(oldDatafeedConfig, headers); if (newDatafeedConfig.getJobId().equals(oldDatafeedConfig.getJobId()) == false) { checkJobIsAvailableForDatafeed(newDatafeedConfig.getJobId()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 444532a7e3f15..27498bd1549ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -264,7 +263,7 @@ ChunkingConfig getChunkingConfig() { * Applies the update to the given {@link DatafeedConfig} * @return a new {@link DatafeedConfig} that contains the update */ - public DatafeedConfig apply(DatafeedConfig datafeedConfig, ThreadContext threadContext) { + public DatafeedConfig apply(DatafeedConfig datafeedConfig, Map headers) { if (id.equals(datafeedConfig.getId()) == false) { throw new IllegalArgumentException("Cannot apply update to datafeedConfig with different id"); } @@ -301,12 +300,12 @@ public DatafeedConfig apply(DatafeedConfig datafeedConfig, ThreadContext threadC builder.setChunkingConfig(chunkingConfig); } - if (threadContext != null) { + if (headers.isEmpty() == false) { // Adjust the request, adding security headers from the current thread context - Map headers = threadContext.getHeaders().entrySet().stream() + Map securityHeaders = headers.entrySet().stream() .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - builder.setHeaders(headers); + builder.setHeaders(securityHeaders); } return builder.build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index d059e567d1588..358f9d1c97bd7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -114,7 +114,7 @@ public void testApply_failBecauseTargetDatafeedHasDifferentId() { public void testApply_givenEmptyUpdate() { DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); - DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build().apply(datafeed, null); + DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build().apply(datafeed, Collections.emptyMap()); assertThat(datafeed, equalTo(updatedDatafeed)); } @@ -125,7 +125,7 @@ public void testApply_givenPartialUpdate() { DatafeedUpdate.Builder updated = new DatafeedUpdate.Builder(datafeed.getId()); updated.setScrollSize(datafeed.getScrollSize() + 1); - DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); DatafeedConfig.Builder expectedDatafeed = new DatafeedConfig.Builder(datafeed); expectedDatafeed.setScrollSize(datafeed.getScrollSize() + 1); @@ -149,7 +149,7 @@ public void testApply_givenFullUpdateNoAggregations() { update.setScrollSize(8000); update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); - DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); assertThat(updatedDatafeed.getJobId(), equalTo("bar")); assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2"))); @@ -175,7 +175,7 @@ public void testApply_givenAggregations() { update.setAggregations(new AggregatorFactories.Builder().addAggregator( AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))); - DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1"))); assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_1"))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 81f4a90f575af..ede92fbbab950 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -213,7 +212,7 @@ public void onFailure(String source, Exception e) { } @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { logger.debug("Job [" + jobId + "] is successfully marked as deleted"); listener.onResponse(true); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 08a9dfb09c1d9..88c72578023f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.core.security.support.Exceptions; import java.io.IOException; +import java.util.Map; public class TransportPutDatafeedAction extends TransportMasterNodeAction { @@ -95,7 +96,7 @@ protected void masterOperation(PutDatafeedAction.Request request, ClusterState s client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { - putDatafeed(request, listener); + putDatafeed(request, threadPool.getThreadContext().getHeaders(), listener); } } @@ -103,7 +104,7 @@ private void handlePrivsResponse(String username, PutDatafeedAction.Request requ HasPrivilegesResponse response, ActionListener listener) throws IOException { if (response.isCompleteMatch()) { - putDatafeed(request, listener); + putDatafeed(request, threadPool.getThreadContext().getHeaders(), listener); } else { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -120,7 +121,8 @@ private void handlePrivsResponse(String username, PutDatafeedAction.Request requ } } - private void putDatafeed(PutDatafeedAction.Request request, ActionListener listener) { + private void putDatafeed(PutDatafeedAction.Request request, Map headers, + ActionListener listener) { clusterService.submitStateUpdateTask( "put-datafeed-" + request.getDatafeed().getId(), @@ -136,16 +138,16 @@ protected PutDatafeedAction.Response newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { - return putDatafeed(request, currentState); + return putDatafeed(request, headers, currentState); } }); } - private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) { + private ClusterState putDatafeed(PutDatafeedAction.Request request, Map headers, ClusterState clusterState) { XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) - .putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build(); + .putDatafeed(request.getDatafeed(), headers).build(); return ClusterState.builder(clusterState).metaData( MetaData.builder(clusterState.getMetaData()).putCustom(MLMetadataField.TYPE, newMetadata).build()) .build(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index 4d752fe294081..4e43cbb185330 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -27,6 +27,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import java.util.Map; + public class TransportUpdateDatafeedAction extends TransportMasterNodeAction { @Inject @@ -50,6 +52,8 @@ protected PutDatafeedAction.Response newResponse() { @Override protected void masterOperation(UpdateDatafeedAction.Request request, ClusterState state, ActionListener listener) { + final Map headers = threadPool.getThreadContext().getHeaders(); + clusterService.submitStateUpdateTask("update-datafeed-" + request.getUpdate().getId(), new AckedClusterStateUpdateTask(request, listener) { private volatile DatafeedConfig updatedDatafeed; @@ -69,7 +73,7 @@ public ClusterState execute(ClusterState currentState) { PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) - .updateDatafeed(update, persistentTasks, threadPool.getThreadContext()).build(); + .updateDatafeed(update, persistentTasks, headers).build(); updatedDatafeed = newMetadata.getDatafeed(update.getId()); return ClusterState.builder(currentState).metaData( MetaData.builder(currentState.getMetaData()).putCustom(MLMetadataField.TYPE, newMetadata).build()).build(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 85e8eb5759878..391357076cbbe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; @@ -347,8 +346,8 @@ public void onFailure(String source, Exception e) { } @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { - afterClusterStateUpdate(clusterChangedEvent.state(), request); + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + afterClusterStateUpdate(newState, request); actionListener.onResponse(new PutJobAction.Response(updatedJob.get())); } }); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java index f6fb2db3c9bb9..ecfe712858331 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java @@ -30,9 +30,11 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.Map; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; @@ -42,6 +44,7 @@ import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedJob; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -63,7 +66,7 @@ protected MlMetadata createTestInstance() { } job = new Job.Builder(job).setAnalysisConfig(analysisConfig).build(); builder.putJob(job, false); - builder.putDatafeed(datafeedConfig, null); + builder.putDatafeed(datafeedConfig, Collections.emptyMap()); } else { builder.putJob(job, false); } @@ -164,7 +167,7 @@ public void testRemoveJob_failDatafeedRefersToJob() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> builder.deleteJob(job1.getId(), new PersistentTasksCustomMetaData(0L, Collections.emptyMap()))); @@ -184,7 +187,7 @@ public void testCrudDatafeed() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); MlMetadata result = builder.build(); assertThat(result.getJobs().get("job_id"), sameInstance(job1)); @@ -201,7 +204,7 @@ public void testPutDatafeed_failBecauseJobDoesNotExist() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", "missing-job").build(); MlMetadata.Builder builder = new MlMetadata.Builder(); - expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, null)); + expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, Collections.emptyMap())); } public void testPutDatafeed_failBecauseJobIsBeingDeleted() { @@ -210,7 +213,7 @@ public void testPutDatafeed_failBecauseJobIsBeingDeleted() { MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, null)); + expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, Collections.emptyMap())); } public void testPutDatafeed_failBecauseDatafeedIdIsAlreadyTaken() { @@ -218,9 +221,9 @@ public void testPutDatafeed_failBecauseDatafeedIdIsAlreadyTaken() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); - expectThrows(ResourceAlreadyExistsException.class, () -> builder.putDatafeed(datafeedConfig1, null)); + expectThrows(ResourceAlreadyExistsException.class, () -> builder.putDatafeed(datafeedConfig1, Collections.emptyMap())); } public void testPutDatafeed_failBecauseJobAlreadyHasDatafeed() { @@ -229,10 +232,10 @@ public void testPutDatafeed_failBecauseJobAlreadyHasDatafeed() { DatafeedConfig datafeedConfig2 = createDatafeedConfig("datafeed2", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> builder.putDatafeed(datafeedConfig2, null)); + () -> builder.putDatafeed(datafeedConfig2, Collections.emptyMap())); assertThat(e.status(), equalTo(RestStatus.CONFLICT)); } @@ -246,7 +249,23 @@ public void testPutDatafeed_failBecauseJobIsNotCompatibleForDatafeed() { MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1.build(now), false); - expectThrows(ElasticsearchStatusException.class, () -> builder.putDatafeed(datafeedConfig1, null)); + expectThrows(ElasticsearchStatusException.class, () -> builder.putDatafeed(datafeedConfig1, Collections.emptyMap())); + } + + public void testPutDatafeed_setsSecurityHeaders() { + Job datafeedJob = createDatafeedJob().build(new Date()); + DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed1", datafeedJob.getId()).build(); + MlMetadata.Builder builder = new MlMetadata.Builder(); + builder.putJob(datafeedJob, false); + + Map headers = new HashMap<>(); + headers.put("unrelated_header", "unrelated_header_value"); + headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, "permitted_run_as_user"); + builder.putDatafeed(datafeedConfig, headers); + MlMetadata metadata = builder.build(); + assertThat(metadata.getDatafeed("datafeed1").getHeaders().size(), equalTo(1)); + assertThat(metadata.getDatafeed("datafeed1").getHeaders(), + hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "permitted_run_as_user")); } public void testUpdateDatafeed() { @@ -254,12 +273,13 @@ public void testUpdateDatafeed() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); MlMetadata beforeMetadata = builder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId()); update.setScrollSize(5000); - MlMetadata updatedMetadata = new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null).build(); + MlMetadata updatedMetadata = + new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, Collections.emptyMap()).build(); DatafeedConfig updatedDatafeed = updatedMetadata.getDatafeed(datafeedConfig1.getId()); assertThat(updatedDatafeed.getJobId(), equalTo(datafeedConfig1.getJobId())); @@ -271,7 +291,8 @@ public void testUpdateDatafeed() { public void testUpdateDatafeed_failBecauseDatafeedDoesNotExist() { DatafeedUpdate.Builder update = new DatafeedUpdate.Builder("job_id"); update.setScrollSize(5000); - expectThrows(ResourceNotFoundException.class, () -> new MlMetadata.Builder().updateDatafeed(update.build(), null, null).build()); + expectThrows(ResourceNotFoundException.class, + () -> new MlMetadata.Builder().updateDatafeed(update.build(), null, Collections.emptyMap()).build()); } public void testUpdateDatafeed_failBecauseDatafeedIsNotStopped() { @@ -279,7 +300,7 @@ public void testUpdateDatafeed_failBecauseDatafeedIsNotStopped() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); MlMetadata beforeMetadata = builder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -300,14 +321,14 @@ public void testUpdateDatafeed_failBecauseNewJobIdDoesNotExist() { DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); MlMetadata beforeMetadata = builder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId()); update.setJobId(job1.getId() + "_2"); expectThrows(ResourceNotFoundException.class, - () -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null)); + () -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, Collections.emptyMap())); } public void testUpdateDatafeed_failBecauseNewJobHasAnotherDatafeedAttached() { @@ -319,25 +340,46 @@ public void testUpdateDatafeed_failBecauseNewJobHasAnotherDatafeedAttached() { MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); builder.putJob(job2.build(), false); - builder.putDatafeed(datafeedConfig1, null); - builder.putDatafeed(datafeedConfig2, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); + builder.putDatafeed(datafeedConfig2, Collections.emptyMap()); MlMetadata beforeMetadata = builder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId()); update.setJobId(job2.getId()); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null)); + () -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, Collections.emptyMap())); assertThat(e.status(), equalTo(RestStatus.CONFLICT)); assertThat(e.getMessage(), equalTo("A datafeed [datafeed2] already exists for job [job_id_2]")); } + public void testUpdateDatafeed_setsSecurityHeaders() { + Job datafeedJob = createDatafeedJob().build(new Date()); + DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed1", datafeedJob.getId()).build(); + MlMetadata.Builder builder = new MlMetadata.Builder(); + builder.putJob(datafeedJob, false); + builder.putDatafeed(datafeedConfig, Collections.emptyMap()); + MlMetadata beforeMetadata = builder.build(); + assertTrue(beforeMetadata.getDatafeed("datafeed1").getHeaders().isEmpty()); + + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig.getId()); + update.setQueryDelay(TimeValue.timeValueMinutes(5)); + + Map headers = new HashMap<>(); + headers.put("unrelated_header", "unrelated_header_value"); + headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, "permitted_run_as_user"); + MlMetadata afterMetadata = new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, headers).build(); + Map updatedHeaders = afterMetadata.getDatafeed("datafeed1").getHeaders(); + assertThat(updatedHeaders.size(), equalTo(1)); + assertThat(updatedHeaders, hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "permitted_run_as_user")); + } + public void testRemoveDatafeed_failBecauseDatafeedStarted() { Job job1 = createDatafeedJob().build(new Date()); DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build(); MlMetadata.Builder builder = new MlMetadata.Builder(); builder.putJob(job1, false); - builder.putDatafeed(datafeedConfig1, null); + builder.putDatafeed(datafeedConfig1, Collections.emptyMap()); MlMetadata result = builder.build(); assertThat(result.getJobs().get("job_id"), sameInstance(job1)); @@ -378,9 +420,9 @@ public void testExpandJobIds() { public void testExpandDatafeedIds() { MlMetadata.Builder mlMetadataBuilder = newMlMetadataWithJobs("bar-1", "foo-1", "foo-2"); - mlMetadataBuilder.putDatafeed(createDatafeedConfig("bar-1-feed", "bar-1").build(), null); - mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-1-feed", "foo-1").build(), null); - mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-2-feed", "foo-2").build(), null); + mlMetadataBuilder.putDatafeed(createDatafeedConfig("bar-1-feed", "bar-1").build(), Collections.emptyMap()); + mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-1-feed", "foo-1").build(), Collections.emptyMap()); + mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-2-feed", "foo-2").build(), Collections.emptyMap()); MlMetadata mlMetadata = mlMetadataBuilder.build(); @@ -409,7 +451,7 @@ protected MlMetadata mutateInstance(MlMetadata instance) { metadataBuilder.putJob(entry.getValue(), true); } for (Map.Entry entry : datafeeds.entrySet()) { - metadataBuilder.putDatafeed(entry.getValue(), null); + metadataBuilder.putDatafeed(entry.getValue(), Collections.emptyMap()); } switch (between(0, 1)) { @@ -430,7 +472,7 @@ protected MlMetadata mutateInstance(MlMetadata instance) { } randomJob = new Job.Builder(randomJob).setAnalysisConfig(analysisConfig).build(); metadataBuilder.putJob(randomJob, false); - metadataBuilder.putDatafeed(datafeedConfig, null); + metadataBuilder.putDatafeed(datafeedConfig, Collections.emptyMap()); break; default: throw new AssertionError("Illegal randomisation branch"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index d65fc1476e75e..0e7ad29c54da9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -51,7 +51,7 @@ public void testValidate_datafeedIsStarted() { MlMetadata.Builder mlBuilder = new MlMetadata.Builder(); mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id").build(new Date()), false); mlBuilder.putDatafeed(BaseMlIntegTestCase.createDatafeed("datafeed_id", "job_id", - Collections.singletonList("*")), null); + Collections.singletonList("*")), Collections.emptyMap()); final PersistentTasksCustomMetaData.Builder startDataFeedTaskBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", null, JobState.OPENED, startDataFeedTaskBuilder); addTask("datafeed_id", 0L, null, DatafeedState.STARTED, startDataFeedTaskBuilder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index af9446ed972cb..72c8d361dd882 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -45,7 +45,7 @@ public void testValidate_jobClosed() { PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder().build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, null) + .putDatafeed(datafeedConfig1, Collections.emptyMap()) .build(); Exception e = expectThrows(ElasticsearchStatusException.class, () -> TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks)); @@ -62,7 +62,7 @@ public void testValidate_jobOpening() { PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, null) + .putDatafeed(datafeedConfig1, Collections.emptyMap()) .build(); TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks); @@ -78,7 +78,7 @@ public void testValidate_jobOpened() { PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1) - .putDatafeed(datafeedConfig1, null) + .putDatafeed(datafeedConfig1, Collections.emptyMap()) .build(); TransportStartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java index 55a0f4006bcdd..934642986de96 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java @@ -42,7 +42,7 @@ public void testValidate() { DatafeedConfig datafeedConfig = createDatafeedConfig("foo", "job_id").build(); MlMetadata mlMetadata2 = new MlMetadata.Builder().putJob(job, false) - .putDatafeed(datafeedConfig, null) + .putDatafeed(datafeedConfig, Collections.emptyMap()) .build(); TransportStopDatafeedAction.validateDatafeedTask("foo", mlMetadata2); } @@ -54,12 +54,12 @@ public void testResolveDataFeedIds_GivenDatafeedId() { addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()); DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null); + mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date()); datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null); + mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); MlMetadata mlMetadata = mlMetadataBuilder.build(); @@ -86,17 +86,17 @@ public void testResolveDataFeedIds_GivenAll() { addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date()); DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null); + mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date()); datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null); + mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); addTask("datafeed_3", 0L, "node-1", DatafeedState.STOPPING, tasksBuilder); job = BaseMlIntegTestCase.createScheduledJob("job_id_3").build(new Date()); datafeedConfig = createDatafeedConfig("datafeed_3", "job_id_3").build(); - mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null); + mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, Collections.emptyMap()); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); MlMetadata mlMetadata = mlMetadataBuilder.build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index f609f0c8c5ed9..6ce03d22b64f0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -84,7 +84,7 @@ public void setUpTests() { Job job = createDatafeedJob().build(new Date()); mlMetadata.putJob(job, false); DatafeedConfig datafeed = createDatafeedConfig("datafeed_id", job.getId()).build(); - mlMetadata.putDatafeed(datafeed, null); + mlMetadata.putDatafeed(datafeed, Collections.emptyMap()); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); PersistentTasksCustomMetaData tasks = tasksBuilder.build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 96ae3b5ef38b6..f3fa804bb27b9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -68,7 +68,7 @@ public void testSelectNode_GivenJobIsOpened() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -86,7 +86,7 @@ public void testSelectNode_GivenJobIsOpening() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -106,7 +106,7 @@ public void testNoJobTask() { mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); tasks = PersistentTasksCustomMetaData.builder().build(); @@ -128,7 +128,7 @@ public void testSelectNode_GivenJobFailedOrClosed() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -156,7 +156,7 @@ public void testShardUnassigned() { mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -182,7 +182,7 @@ public void testShardNotAllActive() { mlMetadataBuilder.putJob(job, false); // Using wildcard index name to test for index resolving as well - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -207,7 +207,8 @@ public void testIndexDoesntExist() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), + Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -231,7 +232,8 @@ public void testRemoteIndex() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")), + Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -248,7 +250,7 @@ public void testSelectNode_jobTaskStale() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); String nodeId = randomBoolean() ? "node_id2" : null; @@ -286,7 +288,8 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(); Job job = createScheduledJob("job_id").build(new Date()); mlMetadataBuilder.putJob(job, false); - mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), null); + mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), + Collections.emptyMap()); mlMetadata = mlMetadataBuilder.build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java index 357c2bc232552..14ec4813a749e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; @@ -47,7 +46,7 @@ public void onFailure(String source, Exception e) { } @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { markAsDeletedLatch.countDown(); } }); @@ -90,7 +89,7 @@ public void onFailure(String source, Exception e) { } @Override - public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { removeJobLatch.countDown(); } }); From 4860c920a69e3dc9b872105c30a9a58eee09750e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 18 Jun 2018 12:06:42 -0400 Subject: [PATCH 30/41] Docs: Use the default distribution to test docs (#31251) This switches the docs tests from the `oss-zip` distribution to the `zip` distribution so they have xpack installed and configured with the default basic license. The goal is to be able to merge the `x-pack/docs` directory into the `docs` directory, marking the x-pack docs with some kind of marker. This is the first step in that process. This also enables `-Dtests.distribution` support for the `docs` directory so you can run the tests against the `oss-zip` distribution with something like ``` ./gradlew -p docs check -Dtests.distribution=oss-zip ``` We can set up Jenkins to run both. Relates to #30665 --- .../gradle/doc/DocsTestPlugin.groovy | 4 +++ docs/Versions.asciidoc | 1 + docs/build.gradle | 1 - docs/plugins/discovery-azure-classic.asciidoc | 2 +- docs/reference/cat/nodeattrs.asciidoc | 25 +++++++++++++++--- docs/reference/cat/templates.asciidoc | 7 +++++ docs/reference/cat/thread_pool.asciidoc | 26 ++++++++++++------- docs/reference/cluster/nodes-info.asciidoc | 4 +-- docs/reference/cluster/stats.asciidoc | 26 +++++++++---------- .../setup/install/check-running.asciidoc | 2 +- 10 files changed, 66 insertions(+), 32 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index f674dbd33cdfd..27f122b8610ee 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -32,6 +32,8 @@ public class DocsTestPlugin extends RestTestPlugin { public void apply(Project project) { project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) + // The distribution can be configured with -Dtests.distribution on the command line + project.integTestCluster.distribution = System.getProperty('tests.distribution', 'zip') // Docs are published separately so no need to assemble project.tasks.remove(project.assemble) project.build.dependsOn.remove('assemble') @@ -43,6 +45,8 @@ public class DocsTestPlugin extends RestTestPlugin { '\\{version\\}': VersionProperties.elasticsearch.toString().replace('-SNAPSHOT', ''), '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), + '\\{build_flavor\\}' : + project.integTestCluster.distribution.startsWith('oss-') ? 'oss' : 'default', ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index e62bd0df9ff63..427825a77ddd7 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -5,6 +5,7 @@ :branch: 6.x :jdk: 1.8.0_131 :jdk_major: 8 +:build_flavor: default ////////// release-state can be: released | prerelease | unreleased diff --git a/docs/build.gradle b/docs/build.gradle index c6ded0292bc92..f28de4fa6fde9 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -20,7 +20,6 @@ apply plugin: 'elasticsearch.docs-test' integTestCluster { - distribution = 'oss-zip' /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ setting 'script.painless.regex.enabled', 'true' diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index c56991b8f507f..1c1925de878aa 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -372,7 +372,7 @@ This command should give you a JSON result: "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", - "build_flavor" : "oss", + "build_flavor" : "{build_flavor}", "build_type" : "zip", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 196f142cc35e1..6c474c2117943 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -9,15 +9,23 @@ For example: GET /_cat/nodeattrs?v -------------------------------------------------- // CONSOLE +// TEST[s/\?v/\?v&s=node,attr/] +// Sort the resulting attributes so we can assert on them more easilly Could look like: [source,txt] -------------------------------------------------- node host ip attr value -EK_AsJb 127.0.0.1 127.0.0.1 testattr test +... +node-0 127.0.0.1 127.0.0.1 testattr test +... -------------------------------------------------- -// TESTRESPONSE[s/EK_AsJb/.+/ _cat] +// TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/] +// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat] +// If xpack is not installed then neither ... with match anything +// If xpack is installed then the first ... contains ml attributes +// and the second contains xpack.installed=true The first few columns (`node`, `host`, `ip`) give you basic info per node and the `attr` and `value` columns give you the custom node attributes, @@ -46,15 +54,24 @@ mode (`v`). The header name will match the supplied value (e.g., GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- // CONSOLE +// TEST[s/,value/,value&s=node,attr/] +// Sort the resulting attributes so we can assert on them more easilly Might look like: [source,txt] -------------------------------------------------- name pid attr value -EK_AsJb 19566 testattr test +... +node-0 19566 testattr test +... -------------------------------------------------- -// TESTRESPONSE[s/EK_AsJb/.+/ s/19566/\\d*/ _cat] +// TESTRESPONSE[s/19566/\\d*/] +// TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/] +// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat] +// If xpack is not installed then neither ... with match anything +// If xpack is installed then the first ... contains ml attributes +// and the second contains xpack.installed=true [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index bc221d13552c0..076e84b72b5d3 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -8,9 +8,16 @@ The `templates` command provides information about existing templates. GET /_cat/templates?v&s=name -------------------------------------------------- // CONSOLE +// TEST[s/templates/templates\/template*/] // TEST[s/^/PUT _template\/template0\n{"index_patterns": "te*", "order": 0}\n/] // TEST[s/^/PUT _template\/template1\n{"index_patterns": "tea*", "order": 1}\n/] // TEST[s/^/PUT _template\/template2\n{"index_patterns": "teak*", "order": 2, "version": 7}\n/] +// The substitions do two things: +// 1. Filter the response to just templates matching the te* pattern +// so that we only get the templates we expect regardless of which +// templates exist. If xpack is installed there will be unexpected +// templates. +// 2. Create some templates to expect in the response. which looks like diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index f1c7664ae33f6..0f8e74c670335 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -18,19 +18,19 @@ node-0 analyze 0 0 0 node-0 fetch_shard_started 0 0 0 node-0 fetch_shard_store 0 0 0 node-0 flush 0 0 0 -node-0 force_merge 0 0 0 -node-0 generic 0 0 0 -node-0 get 0 0 0 -node-0 index 0 0 0 -node-0 listener 0 0 0 -node-0 management 1 0 0 -node-0 refresh 0 0 0 -node-0 search 0 0 0 -node-0 snapshot 0 0 0 -node-0 warmer 0 0 0 +... node-0 write 0 0 0 -------------------------------------------------- +// TESTRESPONSE[s/\.\.\./(node-0 .+ 0 0 0\n)+/] // TESTRESPONSE[s/\d+/\\d+/ _cat] +// The substitutions do two things: +// 1. Expect any number of extra thread pools. This allows us to only list a +// few thread pools. The list would be super long otherwise. In addition, +// if xpack is installed then the list will contain more thread pools and +// this way we don't have to assert about them. +// 2. Expect any number of active, queued, or rejected items. We really don't +// know how many there will be and we just want to assert that there are +// numbers in the response, not *which* numbers are there. The first column is the node name @@ -54,10 +54,16 @@ get index listener management +ml_autodetect (default distro only) +ml_datafeed (default distro only) +ml_utility (default distro only) refresh +rollup_indexing (default distro only)` search +security-token-key (default distro only) snapshot warmer +watcher (default distro only) write -------------------------------------------------- diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index 6522d0f5ad68a..2cd61dd905ff6 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -142,7 +142,7 @@ The result will look similar to: "host": "node-0.elastic.co", "ip": "192.168.17", "version": "{version}", - "build_flavor": "oss", + "build_flavor": "{build_flavor}", "build_type": "zip", "build_hash": "587409e", "roles": [ @@ -237,7 +237,7 @@ The result will look similar to: "host": "node-0.elastic.co", "ip": "192.168.17", "version": "{version}", - "build_flavor": "oss", + "build_flavor": "{build_flavor}", "build_type": "zip", "build_hash": "587409e", "roles": [], diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 6efb4dced8bb8..191da2660d668 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -192,23 +192,23 @@ Will return, for example: "description": "Ingest processor that extracts information from a user agent", "classname": "org.elasticsearch.ingest.useragent.IngestUserAgentPlugin", "has_native_controller": false - } + }, + ... ], - "network_types" : { - "transport_types" : { - "netty4" : 1 - }, - "http_types" : { - "netty4" : 1 - } - } + ... } } -------------------------------------------------- // TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/] +// TESTRESPONSE[s/\.\.\./"network_types": "replace_me"/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] -//// -The TESTRESPONSE above replace all the fields values by the expected ones in the test, -because we don't really care about the field values but we want to check the fields names. -//// \ No newline at end of file +// These replacements do a few things: +// 1. Ignore the contents of the `plugins` object because we don't know all of +// the plugins that will be in it. And because we figure folks don't need to +// see an exhaustive list anyway. +// 2. The last ... contains more things that we don't think are important to +// include in the output. +// 3. All of the numbers and strings on the right hand side of *every* field in +// the response are ignored. So we're really only asserting things about the +// the shape of this response, not the values in it. diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index 0cfc4b329ecfa..7b95a10158d2f 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -19,7 +19,7 @@ which should give you a response something like this: "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", - "build_flavor" : "oss", + "build_flavor" : "{build_flavor}", "build_type" : "zip", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", From 8393554dc060f2e7d6c01ae0d4f2c7b84ab6b0f4 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 18 Jun 2018 19:03:46 +0200 Subject: [PATCH 31/41] RestAPI: Reject forcemerge requests with a body (#30792) This commit adds validation to forcemerge rest requests which contain a body. All parameters to force merge must be part of http params. closes #29584 --- .../admin/indices/RestForceMergeAction.java | 3 ++ .../forcemerge/RestForceMergeActionTests.java | 47 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index dcc397be14263..6ec4cec77193e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -47,6 +47,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + if (request.hasContent()) { + throw new IllegalArgumentException("forcemerge takes arguments in query parameters, not in the request body"); + } ForceMergeRequest mergeRequest = new ForceMergeRequest(Strings.splitStringByCommaToArray(request.param("index"))); mergeRequest.indicesOptions(IndicesOptions.fromRequest(request, mergeRequest.indicesOptions())); mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java new file mode 100644 index 0000000000000..aeb5beb09e2fc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.forcemerge; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class RestForceMergeActionTests extends ESTestCase { + + public void testBodyRejection() throws Exception { + final RestForceMergeAction handler = new RestForceMergeAction(Settings.EMPTY, mock(RestController.class)); + String json = JsonXContent.contentBuilder().startObject().field("max_num_segments", 1).endObject().toString(); + final FakeRestRequest request = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withContent(new BytesArray(json), XContentType.JSON).build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> handler.prepareRequest(request, mock(NodeClient.class))); + assertThat(e.getMessage(), equalTo("forcemerge takes arguments in query parameters, not in the request body")); + } +} From 562a43c341481cc1a5e2d42cb176efd58455171d Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 18 Jun 2018 10:43:00 -0700 Subject: [PATCH 32/41] [DOCS] Backports breaking change (#31373) --- docs/reference/release-notes/6.3.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc index 467f81f93384d..e47f59118a2bc 100644 --- a/docs/reference/release-notes/6.3.asciidoc +++ b/docs/reference/release-notes/6.3.asciidoc @@ -23,14 +23,13 @@ Packaging:: Plugins:: * Remove silent batch mode from install plugin {pull}29359[#29359] -Search:: -* Fail _search request with trailing tokens {pull}29428[#29428] (issue: {issue}28995[#28995]) - Security:: * The legacy `XPackExtension` extension mechanism has been removed and replaced with an SPI based extension mechanism that is installed and built as an elasticsearch plugin. + + [[breaking-java-6.3.0]] [float] === Breaking Java changes @@ -63,6 +62,7 @@ REST API:: Search:: * Deprecate slicing on `_uid`. {pull}29353[#29353] +* Generate deprecation warning for _search request with trailing tokens {pull}29428[#29428] (issue: {issue}28995[#28995]) Stats:: * Deprecate the suggest metrics {pull}29627[#29627] (issue: {issue}29589[#29589]) From cbfac4fefe4d7bce91870e95116aa1735e8e6b97 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 18 Jun 2018 11:04:27 -0700 Subject: [PATCH 33/41] [DOCS] Adds security breaking change (#31375) --- docs/reference/migration/migrate_6_3.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/migration/migrate_6_3.asciidoc b/docs/reference/migration/migrate_6_3.asciidoc index ddb5eb5f695ae..64ba49a500d96 100644 --- a/docs/reference/migration/migrate_6_3.asciidoc +++ b/docs/reference/migration/migrate_6_3.asciidoc @@ -55,6 +55,12 @@ accept these permissions either by keeping standard input open and attaching a TTY (i.e., using interactive mode to accept the permissions), or by passing the `--batch` flag. +==== Implementing custom realms with SPI instead of XPackExtension + +The legacy `XPackExtension` extension mechanism has been removed and replaced +with an SPI based extension mechanism that is installed and built as an +elasticsearch plugin. For more information about using SPI loaded security extensions in custom realms, see {stack-ov}/custom-realms.html[Integrating with other authentication systems]. + [[breaking_63_settings_changes]] === Settings changes From d9d8c8ab42d815ddd4ddbf28232a787220780b41 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 18 Jun 2018 13:29:05 -0700 Subject: [PATCH 34/41] [DOCS] Adds monitoring breaking change (#31369) --- docs/reference/migration/migrate_6_3.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/reference/migration/migrate_6_3.asciidoc b/docs/reference/migration/migrate_6_3.asciidoc index 64ba49a500d96..e18021d46222d 100644 --- a/docs/reference/migration/migrate_6_3.asciidoc +++ b/docs/reference/migration/migrate_6_3.asciidoc @@ -94,3 +94,11 @@ place) you can start Elasticsearch with the JVM option `-Des.thread_pool.write.use_bulk_as_display_name=true` to have Elasticsearch continue to display the name of this thread pool as `bulk`. Elasticsearch will stop observing this system property in 7.0.0. + +==== Enabling monitoring + +By default when you install {xpack}, monitoring is enabled but data collection +is disabled. To enable data collection, use the new +`xpack.monitoring.collection.enabled` setting. You can update this setting by +using the <>. For more +information, see <>. From cb3dc1a40564c6b987c589cbedb536dd9cf51e3a Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 18 Jun 2018 15:04:26 -0600 Subject: [PATCH 35/41] Fix reference to XContentBuilder.string() (#31337) In 6.3 this was moved to `Strings.toString(XContentBuilder)` as part of the XContent extraction. This commit fixes the docs to reference the new method. Resolves #31326 --- docs/java-api/docs/index_.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc index b455a7ab01ff3..2ce19cfffa098 100644 --- a/docs/java-api/docs/index_.asciidoc +++ b/docs/java-api/docs/index_.asciidoc @@ -99,11 +99,13 @@ Note that you can also add arrays with `startArray(String)` and other XContentBuilder objects. If you need to see the generated JSON content, you can use the -`string()` method. +`Strings.toString()` method. [source,java] -------------------------------------------------- -String json = builder.string(); +import org.elasticsearch.common.Strings; + +String json = Strings.toString(builder); -------------------------------------------------- From 2dd7e3febb1e06db1bf96d62315c7edb7b948d1e Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 18 Jun 2018 15:59:20 -0400 Subject: [PATCH 36/41] Mute DefaultShardsIT#testDefaultShards test Tracked by #31408 --- .../test/java/org/elasticsearch/test/rest/DefaultShardsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java index de736c84e45b6..5ef82d6a9ec00 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java @@ -30,6 +30,7 @@ public class DefaultShardsIT extends ESRestTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31408") public void testDefaultShards() throws IOException { final Response response = client().performRequest(new Request("PUT", "/index")); final String warning = response.getHeader("Warning"); From e75c51e224b13b388fba34b854787ffd0186766d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 18 Jun 2018 17:24:51 -0400 Subject: [PATCH 37/41] Test: better error message on failure The `DefaultShardIT` test wasn't giving as much information I'd like on failure so I added a bit more. --- .../org/elasticsearch/test/rest/DefaultShardsIT.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java index 5ef82d6a9ec00..74edfbd189a4c 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest; +import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -34,8 +35,16 @@ public class DefaultShardsIT extends ESRestTestCase { public void testDefaultShards() throws IOException { final Response response = client().performRequest(new Request("PUT", "/index")); final String warning = response.getHeader("Warning"); + if (warning == null) { + StringBuilder explanation = new StringBuilder("expected response to contain a warning but did not "); + explanation.append(response); + if (response.getEntity() != null) { + explanation.append(" entity:\n").append(EntityUtils.toString(response.getEntity())); + } + fail(explanation.toString()); + } final Matcher matcher = WARNING_HEADER_PATTERN.matcher(warning); - assertTrue(matcher.matches()); + assertTrue("warning didn't match warning header pattern but was [" + warning + "]", matcher.matches()); final String message = matcher.group(1); assertThat(message, equalTo("the default number of shards will change from [5] to [1] in 7.0.0; " + "if you wish to continue using the default of [5] shards, " From 4be2f3e2700b667fea8d54e459d42b141712bf59 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 18 Jun 2018 13:50:52 -0400 Subject: [PATCH 38/41] Fix defaults in GeoShapeFieldMapper output (#31302) GeoShapeFieldMapper should show actual defaults instead of placeholder values when the mapping is requested with include_defaults=true. Closes #23206 --- .../index/mapper/GeoShapeFieldMapper.java | 17 +++- .../mapper/GeoShapeFieldMapperTests.java | 77 +++++++++++++++++++ 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index d5089c7be0e6f..b130db7544ae5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -555,11 +555,24 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || fieldType().tree().equals(Defaults.TREE) == false) { builder.field(Names.TREE, fieldType().tree()); } - if (includeDefaults || fieldType().treeLevels() != 0) { + + if (fieldType().treeLevels() != 0) { builder.field(Names.TREE_LEVELS, fieldType().treeLevels()); + } else if(includeDefaults && fieldType().precisionInMeters() == -1) { // defaults only make sense if precision is not specified + if ("geohash".equals(fieldType().tree())) { + builder.field(Names.TREE_LEVELS, Defaults.GEOHASH_LEVELS); + } else if ("legacyquadtree".equals(fieldType().tree())) { + builder.field(Names.TREE_LEVELS, Defaults.QUADTREE_LEVELS); + } else if ("quadtree".equals(fieldType().tree())) { + builder.field(Names.TREE_LEVELS, Defaults.QUADTREE_LEVELS); + } else { + throw new IllegalArgumentException("Unknown prefix tree type [" + fieldType().tree() + "]"); + } } - if (includeDefaults || fieldType().precisionInMeters() != -1) { + if (fieldType().precisionInMeters() != -1) { builder.field(Names.TREE_PRESISION, DistanceUnit.METERS.toString(fieldType().precisionInMeters())); + } else if (includeDefaults && fieldType().treeLevels() == 0) { // defaults only make sense if tree levels are not specified + builder.field(Names.TREE_PRESISION, DistanceUnit.METERS.toString(50)); } if (includeDefaults || fieldType().strategyName() != Defaults.STRATEGY) { builder.field(Names.STRATEGY, fieldType().strategyName()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index 865d218670832..30f994745b67f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -27,6 +27,8 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -34,6 +36,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.hamcrest.Matchers.containsString; @@ -517,4 +520,78 @@ public void testEmptyName() throws Exception { assertThat(e.getMessage(), containsString("name cannot be empty string")); } + public void testSerializeDefaults() throws Exception { + DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .endObject().endObject() + .endObject().endObject()); + DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); + String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); + assertTrue(serialized, serialized.contains("\"precision\":\"50.0m\"")); + assertTrue(serialized, serialized.contains("\"tree_levels\":21")); + } + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("tree", "geohash") + .endObject().endObject() + .endObject().endObject()); + DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); + String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); + assertTrue(serialized, serialized.contains("\"precision\":\"50.0m\"")); + assertTrue(serialized, serialized.contains("\"tree_levels\":9")); + } + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .field("tree_levels", "6") + .endObject().endObject() + .endObject().endObject()); + DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); + String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); + assertFalse(serialized, serialized.contains("\"precision\":")); + assertTrue(serialized, serialized.contains("\"tree_levels\":6")); + } + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .field("precision", "6") + .endObject().endObject() + .endObject().endObject()); + DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); + String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); + assertTrue(serialized, serialized.contains("\"precision\":\"6.0m\"")); + assertFalse(serialized, serialized.contains("\"tree_levels\":")); + } + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .field("precision", "6m") + .field("tree_levels", "5") + .endObject().endObject() + .endObject().endObject()); + DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); + String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); + assertTrue(serialized, serialized.contains("\"precision\":\"6.0m\"")); + assertTrue(serialized, serialized.contains("\"tree_levels\":5")); + } + } + + public String toXContentString(GeoShapeFieldMapper mapper) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); + mapper.doXContentBody(builder, true, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))); + return Strings.toString(builder.endObject()); + } + } From a4a156331c68fccb0bd30a9af7233746d89f9a2d Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 19 Jun 2018 09:55:33 +0200 Subject: [PATCH 39/41] Skip get_alias tests for 5.x (#31397) Some recent failures on the mixed cluster tests were caused by #31308. Instead of executing get index API when calling GET /_alias we now go through the get alias API. The behaviour of such API is slightly different on 5.6 compared to 6.x and master as to whether indices that have no aliases are returned or not. In fact #25114 was not backported to 5.6. When the 5.6 node is the elected master, if the get alias API goes through such node or another 5.x node, the get index API will be used internally and all tests are fine. If some 6.x node is hit though by the client request, we will go through the get alias API, but we will do it through the elected master which will not return indices without aliases (at transport, see MetaData#findAliases on 5.6). That means that in a mixed cluster this API will return a different result depending on which node is the elected master and which one is hit by the request. --- .../rest-api-spec/test/indices.get_alias/10_basic.yml | 4 ++-- .../rest-api-spec/test/indices.get_alias/20_empty.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 42cadb9a8b08a..5c1ab8d2fa56e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -20,8 +20,8 @@ setup: --- "Get all aliases via /_alias": - skip: - version: all - reason: Nik will look on Monday + version: " - 5.99.99" + reason: 5.x doesn't return indices without aliases (at transport) - do: indices.create: index: test_index_3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml index 5a7c328fd5b3f..98d2ac41c767b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/20_empty.yml @@ -12,8 +12,8 @@ setup: --- "Check empty aliases when getting all aliases via /_alias": - skip: - version: all - reason: Nik will look on Monday + version: " - 5.99.99" + reason: 5.x doesn't return indices without aliases (at transport) - do: indices.get_alias: {} From e2e59a8b9a38e11fb2e664b4b89332ab50656bf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 19 Jun 2018 12:22:53 +0200 Subject: [PATCH 40/41] Increasing skip version for failing test on 6.x Relates to #31422 --- .../rest-api-spec/test/analysis-common/40_token_filters.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index bfb6c97c24f6d..f0fe03b8f4f75 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -1028,8 +1028,8 @@ --- "delimited_payload_filter": - skip: - version: " - 6.1.99" - reason: delimited_payload_filter deprecated in 6.2, replaced by delimited_payload + version: " - 6.99.99" + reason: AwaitsFix, https://github.com/elastic/elasticsearch/issues/31422. delimited_payload_filter deprecated in 6.2, replaced by delimited_payload features: "warnings" - do: From 70c14a553f6a04b10db690f6f7f74305349ac616 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Tue, 19 Jun 2018 14:21:11 +0200 Subject: [PATCH 41/41] Add get stored script and delete stored script to high level REST API Relates to #27205 (cherry picked from commit 04e4e44) --- .../client/RequestConverters.java | 19 ++ .../client/RestHighLevelClient.java | 60 ++++++ .../client/RequestConvertersTests.java | 28 +++ .../elasticsearch/client/StoredScriptsIT.java | 105 +++++++++ .../StoredScriptsDocumentationIT.java | 204 ++++++++++++++++++ .../high-level/script/delete_script.asciidoc | 81 +++++++ .../high-level/script/get_script.asciidoc | 77 +++++++ .../high-level/supported-apis.asciidoc | 11 + .../rest-api-spec/api/get_script.json | 4 + .../DeleteStoredScriptResponse.java | 5 + .../GetStoredScriptResponse.java | 84 +++++++- .../TransportGetStoredScriptAction.java | 2 +- .../cluster/RestGetStoredScriptAction.java | 42 +--- .../script/StoredScriptSource.java | 6 +- .../DeleteStoredScriptResponseTests.java | 46 ++++ .../GetStoredScriptResponseTests.java | 61 ++++++ 16 files changed, 789 insertions(+), 46 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java create mode 100644 docs/java-rest/high-level/script/delete_script.asciidoc create mode 100644 docs/java-rest/high-level/script/get_script.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponseTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index e65d9480ce6bb..9d2216b0b5b86 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; @@ -903,6 +905,23 @@ static Request getAlias(GetAliasesRequest getAliasesRequest) { return request; } + static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + Params params = new Params(request); + params.withMasterTimeout(getStoredScriptRequest.masterNodeTimeout()); + return request; + } + + static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(deleteStoredScriptRequest.id()).build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + Params params = new Params(request); + params.withTimeout(deleteStoredScriptRequest.timeout()); + params.withMasterTimeout(deleteStoredScriptRequest.masterNodeTimeout()); + return request; + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 0084ce0f90d74..4f2f671418e59 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,6 +26,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -963,6 +967,62 @@ public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldC FieldCapabilitiesResponse::fromXContent, emptySet()); } + /** + * Get stored script by id. + * See + * How to use scripts on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetStoredScriptResponse getScript(GetStoredScriptRequest request, RequestOptions options) throws IOException { + return performRequestAndParseEntity(request, RequestConverters::getScript, options, + GetStoredScriptResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get stored script by id. + * See + * How to use scripts on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getScriptAsync(GetStoredScriptRequest request, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(request, RequestConverters::getScript, options, + GetStoredScriptResponse::fromXContent, listener, emptySet()); + } + + /** + * Delete stored script by id. + * See + * How to use scripts on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeleteStoredScriptResponse deleteScript(DeleteStoredScriptRequest request, RequestOptions options) throws IOException { + return performRequestAndParseEntity(request, RequestConverters::deleteScript, options, + DeleteStoredScriptResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously delete stored script by id. + * See + * How to use scripts on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteScriptAsync(DeleteStoredScriptRequest request, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(request, RequestConverters::deleteScript, options, + DeleteStoredScriptResponse::fromXContent, listener, emptySet()); + } + /** * Asynchronously executes a request using the Field Capabilities API. * See Field Capabilities API diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index aa58f4d775604..6c0ae7c20b856 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; @@ -1981,6 +1983,32 @@ public void testGetTemplateRequest() throws Exception { assertThat(request.getEntity(), nullValue()); } + public void testGetScriptRequest() { + GetStoredScriptRequest getStoredScriptRequest = new GetStoredScriptRequest("x-script"); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(getStoredScriptRequest, expectedParams); + + Request request = RequestConverters.getScript(getStoredScriptRequest); + assertThat(request.getEndpoint(), equalTo("/_scripts/" + getStoredScriptRequest.id())); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + } + + public void testDeleteScriptRequest() { + DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest("x-script"); + + Map expectedParams = new HashMap<>(); + setRandomTimeout(deleteStoredScriptRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(deleteStoredScriptRequest, expectedParams); + + Request request = RequestConverters.deleteScript(deleteStoredScriptRequest); + assertThat(request.getEndpoint(), equalTo("/_scripts/" + deleteStoredScriptRequest.id())); + assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + } + private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java new file mode 100644 index 0000000000000..e6d380a4cc0e1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/StoredScriptsIT.java @@ -0,0 +1,105 @@ +package org.elasticsearch.client;/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.StoredScriptSource; + +import java.util.Collections; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class StoredScriptsIT extends ESRestHighLevelClientTestCase { + + final String id = "calculate-score"; + + public void testGetStoredScript() throws Exception { + final StoredScriptSource scriptSource = + new StoredScriptSource("painless", + "Math.log(_score * 2) + params.my_modifier", + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); + + final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + // TODO: change to HighLevel PutStoredScriptRequest when it will be ready + // so far - using low-level REST API + Response putResponse = + adminClient() + .performRequest("PUT", "/_scripts/calculate-score", emptyMap(), + new StringEntity("{\"script\":" + script + "}", + ContentType.APPLICATION_JSON)); + assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); + assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + + GetStoredScriptRequest getRequest = new GetStoredScriptRequest("calculate-score"); + getRequest.masterNodeTimeout("50s"); + + GetStoredScriptResponse getResponse = execute(getRequest, highLevelClient()::getScript, + highLevelClient()::getScriptAsync); + + assertThat(getResponse.getSource(), equalTo(scriptSource)); + } + + public void testDeleteStoredScript() throws Exception { + final StoredScriptSource scriptSource = + new StoredScriptSource("painless", + "Math.log(_score * 2) + params.my_modifier", + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); + + final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + // TODO: change to HighLevel PutStoredScriptRequest when it will be ready + // so far - using low-level REST API + Response putResponse = + adminClient() + .performRequest("PUT", "/_scripts/" + id, emptyMap(), + new StringEntity("{\"script\":" + script + "}", + ContentType.APPLICATION_JSON)); + assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); + assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + + DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest(id); + deleteRequest.masterNodeTimeout("50s"); + deleteRequest.timeout("50s"); + + DeleteStoredScriptResponse deleteResponse = execute(deleteRequest, highLevelClient()::deleteScript, + highLevelClient()::deleteScriptAsync); + + assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + + GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); + + final ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, + () -> execute(getRequest, highLevelClient()::getScript, + highLevelClient()::getScriptAsync)); + assertThat(statusException.status(), equalTo(RestStatus.NOT_FOUND)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java new file mode 100644 index 0000000000000..0aadae73ce66d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java @@ -0,0 +1,204 @@ +package org.elasticsearch.client.documentation;/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.StoredScriptSource; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +/** + * This class is used to generate the Java Stored Scripts API documentation. + * You need to wrap your code between two tags like: + * // tag::example + * // end::example + * + * Where example is your tag name. + * + * Then in the documentation, you can extract what is between tag and end tags with + * ["source","java",subs="attributes,callouts,macros"] + * -------------------------------------------------- + * include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example] + * -------------------------------------------------- + * + * The column width of the code block is 84. If the code contains a line longer + * than 84, the line will be cut and a horizontal scroll bar will be displayed. + * (the code indentation of the tag is not included in the width) + */ +public class StoredScriptsDocumentationIT extends ESRestHighLevelClientTestCase { + + public void testGetStoredScript() throws Exception { + RestHighLevelClient client = highLevelClient(); + + final StoredScriptSource scriptSource = + new StoredScriptSource("painless", + "Math.log(_score * 2) + params.my_modifier", + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); + + putStoredScript("calculate-score", scriptSource); + + { + // tag::get-stored-script-request + GetStoredScriptRequest request = new GetStoredScriptRequest("calculate-score"); // <1> + // end::get-stored-script-request + + // tag::get-stored-script-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueSeconds(50)); // <1> + request.masterNodeTimeout("50s"); // <2> + // end::get-stored-script-request-masterTimeout + + // tag::get-stored-script-execute + GetStoredScriptResponse getResponse = client.getScript(request, RequestOptions.DEFAULT); + // end::get-stored-script-execute + + // tag::get-stored-script-response + StoredScriptSource storedScriptSource = getResponse.getSource(); // <1> + + String lang = storedScriptSource.getLang(); // <2> + String source = storedScriptSource.getSource(); // <3> + Map options = storedScriptSource.getOptions(); // <4> + // end::get-stored-script-response + + assertThat(storedScriptSource, equalTo(scriptSource)); + + // tag::get-stored-script-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetStoredScriptResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-stored-script-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-stored-script-execute-async + client.getScriptAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::get-stored-script-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + } + + public void testDeleteStoredScript() throws Exception { + RestHighLevelClient client = highLevelClient(); + + final StoredScriptSource scriptSource = + new StoredScriptSource("painless", + "Math.log(_score * 2) + params.my_modifier", + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())); + + putStoredScript("calculate-score", scriptSource); + + // tag::delete-stored-script-request + DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest("calculate-score"); // <1> + // end::delete-stored-script-request + + // tag::delete-stored-script-request-masterTimeout + deleteRequest.masterNodeTimeout(TimeValue.timeValueSeconds(50)); // <1> + deleteRequest.masterNodeTimeout("50s"); // <2> + // end::delete-stored-script-request-masterTimeout + + // tag::delete-stored-script-request-timeout + deleteRequest.timeout(TimeValue.timeValueSeconds(60)); // <1> + deleteRequest.timeout("60s"); // <2> + // end::delete-stored-script-request-timeout + + // tag::delete-stored-script-execute + DeleteStoredScriptResponse deleteResponse = client.deleteScript(deleteRequest, RequestOptions.DEFAULT); + // end::delete-stored-script-execute + + // tag::delete-stored-script-response + boolean acknowledged = deleteResponse.isAcknowledged();// <1> + // end::delete-stored-script-response + + putStoredScript("calculate-score", scriptSource); + + // tag::delete-stored-script-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(DeleteStoredScriptResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-stored-script-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-stored-script-execute-async + client.deleteScriptAsync(deleteRequest, RequestOptions.DEFAULT, listener); // <1> + // end::delete-stored-script-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + private void putStoredScript(String id, StoredScriptSource scriptSource) throws IOException { + final String script = Strings.toString(scriptSource.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + // TODO: change to HighLevel PutStoredScriptRequest when it will be ready + // so far - using low-level REST API + Response putResponse = + adminClient() + .performRequest("PUT", "/_scripts/" + id, emptyMap(), + new StringEntity("{\"script\":" + script + "}", + ContentType.APPLICATION_JSON)); + assertEquals(putResponse.getStatusLine().getReasonPhrase(), 200, putResponse.getStatusLine().getStatusCode()); + assertEquals("{\"acknowledged\":true}", EntityUtils.toString(putResponse.getEntity())); + } +} diff --git a/docs/java-rest/high-level/script/delete_script.asciidoc b/docs/java-rest/high-level/script/delete_script.asciidoc new file mode 100644 index 0000000000000..79b3b0b324715 --- /dev/null +++ b/docs/java-rest/high-level/script/delete_script.asciidoc @@ -0,0 +1,81 @@ +[[java-rest-high-delete-stored-script]] + +=== Delete Stored Script API + +[[java-rest-high-delete-stored-script-request]] +==== Delete Stored Script Request + +A `DeleteStoredScriptRequest` requires an `id`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-request] +-------------------------------------------------- +<1> The id of the script + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the stored script is deleted as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the stored script is deleted as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-delete-stored-script-sync]] +==== Synchronous Execution +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-execute] +-------------------------------------------------- + +[[java-rest-high-delete-stored-script-async]] +==== Asynchronous Execution + +The asynchronous execution of a delete stored script request requires both the `DeleteStoredScriptRequest` +instance and an `ActionListener` instance to be passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-execute-async] +-------------------------------------------------- +<1> The `DeleteStoredScriptRequest` to execute and the `ActionListener` to use when +the execution completes + +[[java-rest-high-delete-stored-script-listener]] +===== Action Listener + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `DeleteStoredScriptResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-delete-stored-script-response]] +==== Delete Stored Script Response + +The returned `DeleteStoredScriptResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[delete-stored-script-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/script/get_script.asciidoc b/docs/java-rest/high-level/script/get_script.asciidoc new file mode 100644 index 0000000000000..a38bdad2bd6af --- /dev/null +++ b/docs/java-rest/high-level/script/get_script.asciidoc @@ -0,0 +1,77 @@ +[[java-rest-high-get-stored-script]] + +=== Get Stored Script API + +[[java-rest-high-get-stored-script-request]] +==== Get Stored Script Request + +A `GetStoredScriptRequest` requires an `id`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-request] +-------------------------------------------------- +<1> The id of the script + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-get-stored-script-sync]] +==== Synchronous Execution +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-execute] +-------------------------------------------------- + +[[java-rest-high-get-stored-script-async]] +==== Asynchronous Execution + +The asynchronous execution of a get stored script request requires both the `GetStoredScriptRequest` +instance and an `ActionListener` instance to be passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-execute-async] +-------------------------------------------------- +<1> The `GetStoredScriptRequest` to execute and the `ActionListener` to use when +the execution completes + +[[java-rest-high-get-stored-script-listener]] +===== Action Listener + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `GetStoredScriptResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-get-stored-script-response]] +==== Get Stored Script Response + +The returned `GetStoredScriptResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[get-stored-script-response] +-------------------------------------------------- +<1> The script object consists of a content and a metadata +<2> The language the script is written in, which defaults to `painless`. +<3> The content of the script +<4> Any named options that should be passed into the script. \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 4cd87a521d104..17acc8f13c04d 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -151,3 +151,14 @@ The Java High Level REST Client supports the following Tasks APIs: include::tasks/list_tasks.asciidoc[] include::tasks/cancel_tasks.asciidoc[] + +== Script APIs + +The Java High Level REST Client supports the following Scripts APIs: + +* <> +* <> + +include::script/get_script.asciidoc[] +include::script/delete_script.asciidoc[] + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json index 2240f0e1a0b75..0b2d6c5a5b9c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json @@ -13,6 +13,10 @@ } }, "params" : { + "master_timeout": { + "type" : "time", + "description" : "Specify timeout for connection to master" + } } }, "body": null diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponse.java index 42f08ae73e06d..741c105866f46 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.storedscripts; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.XContentParser; public class DeleteStoredScriptResponse extends AcknowledgedResponse { @@ -29,4 +30,8 @@ public class DeleteStoredScriptResponse extends AcknowledgedResponse { public DeleteStoredScriptResponse(boolean acknowledged) { super(acknowledged); } + + public static DeleteStoredScriptResponse fromXContent(XContentParser parser) { + return new DeleteStoredScriptResponse(parseAcknowledged(parser)); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index a394fe17f217f..4cf686b9c282c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -21,25 +21,63 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.StoredScriptSource; import java.io.IOException; +import java.util.Objects; -public class GetStoredScriptResponse extends ActionResponse implements ToXContentObject { +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +public class GetStoredScriptResponse extends ActionResponse implements StatusToXContentObject { + + public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); + public static final ParseField FOUND_PARSE_FIELD = new ParseField("found"); + public static final ParseField SCRIPT = new ParseField("script"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("GetStoredScriptResponse", + true, + (a, c) -> { + String id = (String) a[0]; + boolean found = (Boolean)a[1]; + StoredScriptSource scriptSource = (StoredScriptSource)a[2]; + return found ? new GetStoredScriptResponse(id, scriptSource) : new GetStoredScriptResponse(id, null); + }); + + static { + PARSER.declareField(constructorArg(), (p, c) -> p.text(), + _ID_PARSE_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), (p, c) -> p.booleanValue(), + FOUND_PARSE_FIELD, ObjectParser.ValueType.BOOLEAN); + PARSER.declareField(optionalConstructorArg(), (p, c) -> StoredScriptSource.fromXContent(p, true), + SCRIPT, ObjectParser.ValueType.OBJECT); + } + + private String id; private StoredScriptSource source; GetStoredScriptResponse() { } - GetStoredScriptResponse(StoredScriptSource source) { + GetStoredScriptResponse(String id, StoredScriptSource source) { + this.id = id; this.source = source; } + public String getId() { + return id; + } + /** * @return if a stored script and if not found null */ @@ -47,13 +85,30 @@ public StoredScriptSource getSource() { return source; } + @Override + public RestStatus status() { + return source != null ? RestStatus.OK : RestStatus.NOT_FOUND; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - source.toXContent(builder, params); + builder.startObject(); + + builder.field(_ID_PARSE_FIELD.getPreferredName(), id); + builder.field(FOUND_PARSE_FIELD.getPreferredName(), source != null); + if (source != null) { + builder.field(StoredScriptSource.SCRIPT_PARSE_FIELD.getPreferredName()); + source.toXContent(builder, params); + } + builder.endObject(); return builder; } + public static GetStoredScriptResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -67,6 +122,10 @@ public void readFrom(StreamInput in) throws IOException { } else { source = null; } + + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + id = in.readString(); + } } @Override @@ -84,5 +143,22 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(source.getSource()); } } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeString(id); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetStoredScriptResponse that = (GetStoredScriptResponse) o; + return Objects.equals(id, that.id) && + Objects.equals(source, that.source); + } + + @Override + public int hashCode() { + return Objects.hash(id, source); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index ab5a3d9953a51..7450365bc8f33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -60,7 +60,7 @@ protected GetStoredScriptResponse newResponse() { @Override protected void masterOperation(GetStoredScriptRequest request, ClusterState state, ActionListener listener) throws Exception { - listener.onResponse(new GetStoredScriptResponse(scriptService.getStoredScript(state, request))); + listener.onResponse(new GetStoredScriptResponse(request.id(), scriptService.getStoredScript(state, request))); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index 10050dda88235..1a14d50538237 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -19,19 +19,12 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.script.StoredScriptSource; +import org.elasticsearch.rest.action.RestStatusToXContentListener; import java.io.IOException; @@ -39,9 +32,6 @@ public class RestGetStoredScriptAction extends BaseRestHandler { - public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); - public static final ParseField FOUND_PARSE_FIELD = new ParseField("found"); - public RestGetStoredScriptAction(Settings settings, RestController controller) { super(settings); @@ -57,33 +47,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { String id = request.param("id"); GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); - - return channel -> client.admin().cluster().getStoredScript(getRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(GetStoredScriptResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field(_ID_PARSE_FIELD.getPreferredName(), id); - - StoredScriptSource source = response.getSource(); - boolean found = source != null; - builder.field(FOUND_PARSE_FIELD.getPreferredName(), found); - - if (found) { - builder.startObject(StoredScriptSource.SCRIPT_PARSE_FIELD.getPreferredName()); - builder.field(StoredScriptSource.LANG_PARSE_FIELD.getPreferredName(), source.getLang()); - builder.field(StoredScriptSource.SOURCE_PARSE_FIELD.getPreferredName(), source.getSource()); - - if (source.getOptions().isEmpty() == false) { - builder.field(StoredScriptSource.OPTIONS_PARSE_FIELD.getPreferredName(), source.getOptions()); - } - - builder.endObject(); - } - - builder.endObject(); - - return new BytesRestResponse(found ? RestStatus.OK : RestStatus.NOT_FOUND, builder); - } - }); + getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + return channel -> client.admin().cluster().getStoredScript(getRequest, new RestStatusToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 11f8769c86b1f..885d72bdec6f5 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -185,7 +185,7 @@ private StoredScriptSource build(boolean ignoreEmpty) { } } - private static final ObjectParser PARSER = new ObjectParser<>("stored script source", Builder::new); + private static final ObjectParser PARSER = new ObjectParser<>("stored script source", true, Builder::new); static { // Defines the fields necessary to parse a Script as XContent using an ObjectParser. @@ -481,7 +481,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(LANG_PARSE_FIELD.getPreferredName(), lang); builder.field(SOURCE_PARSE_FIELD.getPreferredName(), source); - builder.field(OPTIONS_PARSE_FIELD.getPreferredName(), options); + if (options.isEmpty() == false) { + builder.field(OPTIONS_PARSE_FIELD.getPreferredName(), options); + } builder.endObject(); return builder; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponseTests.java new file mode 100644 index 0000000000000..375a672263060 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptResponseTests.java @@ -0,0 +1,46 @@ +package org.elasticsearch.action.admin.cluster.storedscripts;/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; + +public class DeleteStoredScriptResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected DeleteStoredScriptResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteStoredScriptResponse.fromXContent(parser); + } + + @Override + protected DeleteStoredScriptResponse createBlankInstance() { + return new DeleteStoredScriptResponse(); + } + + @Override + protected DeleteStoredScriptResponse createTestInstance() { + return new DeleteStoredScriptResponse(randomBoolean()); + } + + @Override + protected DeleteStoredScriptResponse mutateInstance(DeleteStoredScriptResponse instance) throws IOException { + return new DeleteStoredScriptResponse(instance.isAcknowledged() == false); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java new file mode 100644 index 0000000000000..1c92c0c8c2bf7 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java @@ -0,0 +1,61 @@ +package org.elasticsearch.action.admin.cluster.storedscripts;/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.StoredScriptSource; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.function.Predicate; + +public class GetStoredScriptResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected GetStoredScriptResponse doParseInstance(XContentParser parser) throws IOException { + return GetStoredScriptResponse.fromXContent(parser); + } + + @Override + protected GetStoredScriptResponse createBlankInstance() { + return new GetStoredScriptResponse(); + } + + @Override + protected GetStoredScriptResponse createTestInstance() { + return new GetStoredScriptResponse(randomAlphaOfLengthBetween(1, 10), randomScriptSource()); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return s -> "script.options".equals(s); + } + + private static StoredScriptSource randomScriptSource() { + final String lang = randomFrom("lang", "painless", "mustache"); + final String source = randomAlphaOfLengthBetween(1, 10); + final Map options = randomBoolean() + ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + : Collections.emptyMap(); + return new StoredScriptSource(lang, source, options); + } +}