From c7dedc279fa531fc2307343a03aee303d36c2ed5 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 27 Sep 2022 09:07:19 +0200 Subject: [PATCH 01/26] Add javadoc for ShardRouting#relocatingNodeId (#90252) --- .../org/elasticsearch/cluster/routing/ShardRouting.java | 9 +++++++++ .../org/elasticsearch/ExceptionSerializationTests.java | 4 ++-- .../collector/shards/ShardsMonitoringDocTests.java | 4 ++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 8301125c8e947..5e2cb2959af95 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -39,6 +39,13 @@ public final class ShardRouting implements Writeable, ToXContentObject { private final ShardId shardId; private final String currentNodeId; + /** + * This field contains + * - node id this shard is relocating to iff state == RELOCATING + * - node id this shard is relocating from iff state == INITIALIZING and this is relocation target + * - {@code null} in other cases + */ + @Nullable private final String relocatingNodeId; private final boolean primary; private final ShardRoutingState state; @@ -81,6 +88,8 @@ public final class ShardRouting implements Writeable, ToXContentObject { : "replica shards always recover from primary"; assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " + this; + assert relocatingNodeId == null || state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING + : state + " shard must not have relocating node " + this; } @Nullable diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index c1de22a53978e..81b00ad20195e 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.IllegalShardRoutingStateException; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -118,6 +117,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.test.TestSearchContext.SHARD_TARGET; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -241,7 +241,7 @@ private T serialize(T exception, Version version) throws I } public void testIllegalShardRoutingStateException() throws IOException { - final ShardRouting routing = TestShardRouting.newShardRouting("test", 0, "xyz", "def", false, ShardRoutingState.STARTED); + final ShardRouting routing = newShardRouting("test", 0, "xyz", false, ShardRoutingState.STARTED); final String routingAsString = routing.toString(); IllegalShardRoutingStateException serialize = serialize( new IllegalShardRoutingStateException(routing, "foo", new NullPointerException()) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsMonitoringDocTests.java index b3db571aab7ae..b23130f7b6a38 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsMonitoringDocTests.java @@ -100,12 +100,12 @@ public void testIdWithReplicaShardAssigned() { } public void testIdWithPrimaryShardUnassigned() { - shardRouting = newShardRouting("_index_2", 789, null, randomAlphaOfLength(5), true, UNASSIGNED); + shardRouting = newShardRouting("_index_2", 789, null, true, UNASSIGNED); assertEquals("_state_uuid_2:_na:_index_2:789:p", ShardMonitoringDoc.id("_state_uuid_2", shardRouting)); } public void testIdWithReplicaShardUnassigned() { - shardRouting = newShardRouting("_index_3", 159, null, randomAlphaOfLength(5), false, UNASSIGNED); + shardRouting = newShardRouting("_index_3", 159, null, false, UNASSIGNED); assertEquals("_state_uuid_3:_na:_index_3:159:r", ShardMonitoringDoc.id("_state_uuid_3", shardRouting)); } From 904eae5ce44fb711ac2d7161af36c76d72e0e2e3 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 27 Sep 2022 10:26:58 +0200 Subject: [PATCH 02/26] EQL sequences: support join on multi-values (#89965) --- docs/changelog/89965.yaml | 5 + .../common/src/main/resources/data/extra.data | 68 +++++--- .../src/main/resources/data/extra.mapping | 3 + .../common/src/main/resources/test_extra.toml | 152 ++++++++++++++++++ .../eql/execution/search/RuntimeUtils.java | 3 +- .../search/extractor/FieldHitExtractor.java | 18 ++- .../extractor/TimestampFieldHitExtractor.java | 2 +- .../eql/execution/sequence/Sequence.java | 6 +- .../eql/execution/sequence/SequenceKey.java | 18 ++- .../execution/sequence/TumblingWindow.java | 65 +++++++- .../CriterionOrdinalExtractionTests.java | 7 +- .../extractor/AbstractFieldHitExtractor.java | 72 +++++++-- .../xpack/sql/execution/search/Querier.java | 17 +- .../search/extractor/FieldHitExtractor.java | 8 +- .../extractor/ComputingExtractorTests.java | 3 +- .../extractor/FieldHitExtractorTests.java | 22 +-- 16 files changed, 397 insertions(+), 72 deletions(-) create mode 100644 docs/changelog/89965.yaml diff --git a/docs/changelog/89965.yaml b/docs/changelog/89965.yaml new file mode 100644 index 0000000000000..4616f2806ba0b --- /dev/null +++ b/docs/changelog/89965.yaml @@ -0,0 +1,5 @@ +pr: 89965 +summary: "EQL sequences: support join on multi-values" +area: EQL +type: bug +issues: [] diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.data b/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.data index d87ed34992cd8..d0ad90da935e5 100644 --- a/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.data +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.data @@ -2,26 +2,31 @@ { "@timestamp": "1", "event_type": "REQUEST", - "transID": 1234, - "sequence": 1 + "transID": [1234, 599999], + "sequence": 1, + "tags": ["tag1", "tag2"] }, { "@timestamp": "2", "event_type": "ERROR", "transID": 1234, - "sequence": 2 + "sequence": 2, + "tags": "tag1" }, { "@timestamp": "3", "event_type": "STAT", - "transID": 1234, - "sequence": 3 + "transID": [1234, 9876554], + "sequence": 3, + "tags": ["tag3", "tag4"] }, { "@timestamp": "10", "event_type": "REQUEST", "transID": 1235, - "sequence": 4 + "sequence": 4, + "tags": ["tag3", "tag4"], + "items": ["item1", "item2", "item3"] }, { "@timestamp": "11", @@ -58,53 +63,66 @@ "event_type": "process", "transID": 1, "process.pid": 123, - "sequence": 10 + "sequence": 10, + "tags": "tag6", + "optional_tags": ["tag1", null, null], + "date_tags": "1970-01-01T00:00:00.000Z" }, { "@timestamp": "1", "event_type": "process", "transID": 2, - "sequence": 11 + "sequence": 11, + "tags": ["tag5", "tag6"], + "optional_tags": [null], + "date_tags": "1970-01-05T00:00:00.000Z" }, { "@timestamp": "2", "event_type": "process", "transID": 2, "process.pid": 123, - "sequence": 12 + "sequence": 12, + "tags_with_duplicates": ["tag1", "tag1", "tag2"], + "date_tags": "1970-01-01T00:00:00.000Z" }, { "@timestamp": "3", "event_type": "file", "transID": 0, "process.pid": 123, - "sequence": 13 + "sequence": 13, + "tags_with_duplicates": ["tag1", "tag2", "tag2"] }, { "@timestamp": "4", "event_type": "file", "transID": 0, "process.pid": 123, - "sequence": 14 + "sequence": 14, + "tags_unordered": ["tag5", "tag1", "tag2"] }, { "@timestamp": "5", "event_type": "file", "transID": 0, "process.pid": 123, - "sequence": 15 + "sequence": 15, + "tags_unordered": ["tag7", "tag1", "tag5"] }, { "@timestamp": "6", "event_type": "file", "transID": 0, - "sequence": 16 + "sequence": 16, + "tags_with_null": ["tag1", null, "tag2"] }, { "@timestamp": "6", "event_type": "file", "transID": 0, - "sequence": 17 + "sequence": 17, + "tags_with_null": null }, { "@timestamp": "7", @@ -113,7 +131,9 @@ "process.entity_id": 512, "process.pid": 123, "version": "1.5.0", - "sequence": 18 + "sequence": 18, + "tags_with_null": [null], + "optional_tags": "tag10" }, { "@timestamp": "8", @@ -122,7 +142,8 @@ "process.entity_id": 512, "process.pid": 123, "version": "1.5.0", - "sequence": 19 + "sequence": 19, + "tags_with_null": ["tag1"] }, { "@timestamp": "9", @@ -131,7 +152,9 @@ "process.entity_id": 512, "process.pid": 123, "version": "1.2.4", - "sequence": 20 + "sequence": 20, + "tags_with_null": "tag2", + "date_tags": "1970-01-01T00:00:00.000Z" }, { "@timestamp": "10", @@ -140,7 +163,8 @@ "process.entity_id": 512, "process.pid": 123, "version": "1.11.3", - "sequence": 21 + "sequence": 21, + "date_tags": [ "1970-01-01T00:00:00.000Z", "1970-01-02T00:00:00.000Z" ] }, { "@timestamp": "11", @@ -149,7 +173,9 @@ "process.entity_id": 512, "process.pid": 123, "version": "bad", - "sequence": 22 + "sequence": 22, + "tags": [], + "date_tags": [ "1970-08-01T00:00:00.000Z", "1970-01-02T00:00:00.000Z" ] }, { "@timestamp": "12", @@ -158,6 +184,8 @@ "process.entity_id": 512, "process.pid": 123, "version": "1.2.4-SNAPSHOT", - "sequence": 23 + "sequence": 23, + "tags": ["tag1", "tag3", "tag4"], + "items": ["item0", "item1", "item2"] } ] diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.mapping b/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.mapping index 2fde8cf52fdd8..7f048964dd4c7 100644 --- a/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.mapping +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/extra.mapping @@ -30,6 +30,9 @@ }, "version": { "type": "version" + }, + "date_tags": { + "type": "date" } } } diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/test_extra.toml b/x-pack/plugin/eql/qa/common/src/main/resources/test_extra.toml index 14352f067b43d..f971ca16c09d4 100644 --- a/x-pack/plugin/eql/qa/common/src/main/resources/test_extra.toml +++ b/x-pack/plugin/eql/qa/common/src/main/resources/test_extra.toml @@ -31,6 +31,16 @@ query = ''' expected_event_ids = [1,2,3] join_keys = ["1234"] +[[queries]] +name = "byDate" +query = ''' + sequence by date_tags + [ process where true ] + [ process where true ] +''' +expected_event_ids = [10, 12] +join_keys = ["1970-01-01T00:00:00.000Z"] + [[queries]] name = "optionalNoDocsFieldNullEquality" query = ''' @@ -271,3 +281,145 @@ query = ''' ''' expected_event_ids = [18, 19] join_keys = ["1.5.0"] + + +[[queries]] +name = "sequenceWithMultivalue_OneToMany" +query = ''' + sequence by tags + [ REQUEST where true ] + [ ERROR where true ] +''' +expected_event_ids = [1, 2] +join_keys = ["tag1"] + + +[[queries]] +name = "sequenceWithMultivalue_ManyToOne" +query = ''' + sequence by tags + [ process where true ] + [ process where true ] +''' +expected_event_ids = [10, 11] +join_keys = ["tag6"] + + +[[queries]] +name = "sequenceWithMultivalue_ManyToMany" +query = ''' + sequence by tags + [ REQUEST where true ] + [ file where true ] +''' +expected_event_ids = [1, 23, 4, 23, 4, 23] +join_keys = ["tag1", "tag3", "tag4"] + + +[[queries]] +name = "sequenceWithMultivalue_CartesianProduct" +query = ''' + sequence by tags, items + [ REQUEST where true ] + [ file where true ] +''' +expected_event_ids = [4, 23, 4, 23, 4, 23, 4, 23] +join_keys = ["tag3", "item1", "tag3", "item2", "tag4", "item1", "tag4", "item2"] + + +[[queries]] +name = "sequenceWithMultivalue_Duplicates" +query = ''' + sequence by tags_with_duplicates + [ process where true ] + [ file where true ] +''' +expected_event_ids = [12, 13, 12, 13] +join_keys = ["tag1", "tag2"] + + +[[queries]] +name = "sequenceWithMultivalue_RandomOrder" +query = ''' + sequence by tags_unordered + [ file where true ] + [ file where true ] +''' +expected_event_ids = [14, 15, 14, 15] +join_keys = ["tag1", "tag5"] + + +[[queries]] +name = "sequenceWithMultivalue_NullValues" +query = ''' + sequence by tags_with_null + [ file where true ] + [ file where true ] +''' +expected_event_ids = [16, 19, 16, 20] +join_keys = ["tag1", "tag2"] + +[[queries]] +name = "sequenceWithMultivalue_DateValues" +query = ''' + sequence by date_tags + [ file where true ] + [ file where true ] +''' +expected_event_ids = [20, 21, 21, 22] +join_keys = [ "1970-01-01T00:00:00.000Z", "1970-01-02T00:00:00.000Z" ] + + +[[queries]] +name = "sequenceWithOptionalMultivalue_simple" +query = ''' + sequence by ?optional_tags + [ process where true ] + [ process where true ] +''' +expected_event_ids = [11, 12] +join_keys = [null] + +[[queries]] +name = "sequenceWithOptionalMultivalue_perQuery" +query = ''' + sequence + [ process where true ] by ?optional_tags + [ process where true ] by ?optional_tags +''' +expected_event_ids = [11, 12] +join_keys = [null] + + +[[queries]] +name = "sequenceWithOptionalMultivalue_multiPlusOptional" +query = ''' + sequence by tags, ?optional_tags + [ REQUEST where true ] + [ ERROR where true ] +''' +expected_event_ids = [1, 2] +join_keys = ["tag1", null] + + +[[queries]] +name = "sequenceWithOptionalMultivalue_optionalPlusMulti" +query = ''' + sequence by ?optional_tags, tags + [ REQUEST where true ] + [ ERROR where true ] +''' +expected_event_ids = [1, 2] +join_keys = [null, "tag1"] + + +[[queries]] +name = "sequenceWithOptionalMultivalue_twoOptionals" +query = ''' + sequence by ?optional_tags, ?tags + [ REQUEST where true ] + [ ERROR where true ] +''' +expected_event_ids = [1, 2] +join_keys = [null, "tag1"] + diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index 382f7f371b8fa..776d4aaf5bccc 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -42,6 +42,7 @@ import java.util.Set; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.FULL; public final class RuntimeUtils { @@ -126,7 +127,7 @@ public static List createExtractor(List fields, E public static HitExtractor createExtractor(FieldExtraction ref, EqlConfiguration cfg) { if (ref instanceof SearchHitFieldRef f) { - return new FieldHitExtractor(f.name(), f.getDataType(), cfg.zoneId(), f.hitName(), false); + return new FieldHitExtractor(f.name(), f.getDataType(), cfg.zoneId(), f.hitName(), FULL); } if (ref instanceof ComputedRef computedRef) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/FieldHitExtractor.java index 80e3ed8f5dd9a..54cd08c464a94 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/FieldHitExtractor.java @@ -17,6 +17,7 @@ import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.List; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; @@ -28,8 +29,14 @@ public FieldHitExtractor(StreamInput in) throws IOException { super(in); } - public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, String hitName, boolean arrayLeniency) { - super(name, dataType, zoneId, hitName, arrayLeniency); + public FieldHitExtractor( + String name, + DataType dataType, + ZoneId zoneId, + String hitName, + AbstractFieldHitExtractor.MultiValueSupport multiValueSupport + ) { + super(name, dataType, zoneId, hitName, multiValueSupport); } @Override @@ -47,10 +54,13 @@ protected Object unwrapCustomValue(Object values) { DataType dataType = dataType(); if (dataType == DATETIME) { - if (values instanceof String) { + if (values instanceof String str) { // We ask @timestamp (or the defined alternative field) to be returned as `epoch_millis` // when matching sequence to avoid parsing into ZonedDateTime objects for performance reasons. - return parseEpochMillisAsString(values.toString()); + return parseEpochMillisAsString(str); + } + if (values instanceof List list) { + return list.stream().map(x -> unwrapCustomValue(x)).collect(Collectors.toList()); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/TimestampFieldHitExtractor.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/TimestampFieldHitExtractor.java index eceadbf7c7461..d42a9fe9da86a 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/TimestampFieldHitExtractor.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/extractor/TimestampFieldHitExtractor.java @@ -12,7 +12,7 @@ public class TimestampFieldHitExtractor extends FieldHitExtractor { public TimestampFieldHitExtractor(FieldHitExtractor target) { - super(target.fieldName(), target.dataType(), target.zoneId(), target.hitName(), target.arrayLeniency()); + super(target.fieldName(), target.dataType(), target.zoneId(), target.hitName(), target.multiValueSupport()); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/Sequence.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/Sequence.java index a013df58b090c..53d06331a79e7 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/Sequence.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/Sequence.java @@ -81,7 +81,11 @@ public long ramBytesUsed() { @Override public int compareTo(Sequence o) { - return ordinal().compareTo(o.ordinal()); + int result = ordinal().compareTo(o.ordinal()); + if (result == 0) { + return key().compareTo(o.key()); + } + return result; } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceKey.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceKey.java index f18ff71a9a96e..1782dada8c51a 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceKey.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceKey.java @@ -17,7 +17,7 @@ import static java.util.Collections.emptyList; -public class SequenceKey implements Accountable { +public class SequenceKey implements Accountable, Comparable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(SequenceKey.class); @@ -63,4 +63,20 @@ public boolean equals(Object obj) { public String toString() { return CollectionUtils.isEmpty(keys) ? "NONE" : Arrays.toString(keys); } + + @Override + @SuppressWarnings({ "unchecked", "rawtypes" }) + public int compareTo(SequenceKey other) { + int length = keys.length; + int otherLength = other.keys.length; + for (int i = 0; i < length && i < otherLength; i++) { + if (keys[i]instanceof Comparable key) { + int result = key.compareTo(other.keys[i]); + if (result != 0) { + return result; + } + } + } + return length - otherLength; + } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java index 9ac5b0dd25b4b..3bee947afeb96 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java @@ -25,7 +25,9 @@ import org.elasticsearch.xpack.eql.session.Payload.Type; import org.elasticsearch.xpack.eql.util.ReversedIterator; import org.elasticsearch.xpack.ql.util.ActionListeners; +import org.elasticsearch.xpack.ql.util.CollectionUtils; +import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; @@ -620,17 +622,70 @@ Iterable> wrapValues(Criterion criterion, return new Iterator<>() { + SearchHit lastFetchedHit = delegate.hasNext() ? delegate.next() : null; + List remainingHitJoinKeys = lastFetchedHit == null ? Collections.emptyList() : extractJoinKeys(lastFetchedHit); + + /** + * extract the join key from a hit. If there are multivalues, the result is the cartesian product. + * eg. + * - if the key is ['a', 'b'], the result is a list containing ['a', 'b'] + * - if the key is ['a', ['b', 'c]], the result is a list containing ['a', 'b'] and ['a', 'c'] + */ + private List extractJoinKeys(SearchHit hit) { + if (hit == null) { + return null; + } + Object[] originalKeys = criterion.key(hit); + + List partial = new ArrayList<>(); + if (originalKeys == null) { + partial.add(null); + } else { + int keySize = originalKeys.length; + partial.add(new Object[keySize]); + for (int i = 0; i < keySize; i++) { + if (originalKeys[i]instanceof List possibleValues) { + List newPartial = new ArrayList<>(possibleValues.size() * partial.size()); + for (Object possibleValue : possibleValues) { + for (Object[] partialKey : partial) { + Object[] newKey = new Object[keySize]; + if (i > 0) { + System.arraycopy(partialKey, 0, newKey, 0, i); + } + newKey[i] = possibleValue; + newPartial.add(newKey); + } + } + partial = newPartial; + } else { + for (Object[] key : partial) { + key[i] = originalKeys[i]; + } + } + } + } + return partial; + } + @Override public boolean hasNext() { - return delegate.hasNext(); + return CollectionUtils.isEmpty(remainingHitJoinKeys) == false || delegate.hasNext(); } @Override public Tuple next() { - SearchHit hit = delegate.next(); - SequenceKey k = key(criterion.key(hit)); - Ordinal o = criterion.ordinal(hit); - return new Tuple<>(new KeyAndOrdinal(k, o), new HitReference(cache(qualifiedIndex(hit)), hit.getId())); + if (remainingHitJoinKeys.isEmpty()) { + lastFetchedHit = delegate.next(); + remainingHitJoinKeys = extractJoinKeys(lastFetchedHit); + } + Object[] joinKeys = remainingHitJoinKeys.remove(0); + + SequenceKey k = key(joinKeys); + Ordinal o = criterion.ordinal(lastFetchedHit); + return new Tuple<>( + new KeyAndOrdinal(k, o), + new HitReference(cache(qualifiedIndex(lastFetchedHit)), lastFetchedHit.getId()) + ); } }; }; diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java index aaceabaf27738..b992ffe968fcb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java @@ -31,13 +31,14 @@ import static org.elasticsearch.xpack.eql.EqlTestUtils.randomSearchLongSortValues; import static org.elasticsearch.xpack.eql.EqlTestUtils.randomSearchSortValues; import static org.elasticsearch.xpack.eql.execution.search.OrdinalTests.randomTimestamp; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.FULL; public class CriterionOrdinalExtractionTests extends ESTestCase { private String tsField = "timestamp"; private String tbField = "tiebreaker"; - private HitExtractor tsExtractor = new FieldHitExtractor(tsField, DataTypes.LONG, null, null, false); - private HitExtractor tbExtractor = new FieldHitExtractor(tbField, DataTypes.LONG, null, null, false); + private HitExtractor tsExtractor = new FieldHitExtractor(tsField, DataTypes.LONG, null, null, FULL); + private HitExtractor tbExtractor = new FieldHitExtractor(tbField, DataTypes.LONG, null, null, FULL); private HitExtractor implicitTbExtractor = ImplicitTiebreakerHitExtractor.INSTANCE; public void testTimeOnly() throws Exception { @@ -67,7 +68,7 @@ public void testTimeAndTiebreakerNull() throws Exception { } public void testTimeNotComparable() throws Exception { - HitExtractor badExtractor = new FieldHitExtractor(tsField, DataTypes.BINARY, null, null, false); + HitExtractor badExtractor = new FieldHitExtractor(tsField, DataTypes.BINARY, null, null, FULL); SearchHit hit = searchHit(randomAlphaOfLength(10), null); Criterion criterion = new Criterion(0, null, emptyList(), badExtractor, null, null, false); EqlIllegalArgumentException exception = expectThrows(EqlIllegalArgumentException.class, () -> criterion.ordinal(hit)); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java index 515b5aa833192..102c88b1cc3e7 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.ql.execution.search.extractor; +import org.elasticsearch.Version; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,21 +32,34 @@ public abstract class AbstractFieldHitExtractor implements HitExtractor { private final String fieldName, hitName; private final DataType dataType; private final ZoneId zoneId; - private final boolean arrayLeniency; + + protected MultiValueSupport multiValueSupport; + + public enum MultiValueSupport { + NONE, + LENIENT, + FULL + } protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId) { - this(name, dataType, zoneId, null, false); + this(name, dataType, zoneId, null, MultiValueSupport.NONE); } - protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId, boolean arrayLeniency) { - this(name, dataType, zoneId, null, arrayLeniency); + protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId, MultiValueSupport multiValueSupport) { + this(name, dataType, zoneId, null, multiValueSupport); } - protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId, String hitName, boolean arrayLeniency) { + protected AbstractFieldHitExtractor( + String name, + DataType dataType, + ZoneId zoneId, + String hitName, + MultiValueSupport multiValueSupport + ) { this.fieldName = name; this.dataType = dataType; this.zoneId = zoneId; - this.arrayLeniency = arrayLeniency; + this.multiValueSupport = multiValueSupport; this.hitName = hitName; if (hitName != null) { @@ -60,7 +74,11 @@ protected AbstractFieldHitExtractor(StreamInput in) throws IOException { String typeName = in.readOptionalString(); dataType = typeName != null ? loadTypeFromName(typeName) : null; hitName = in.readOptionalString(); - arrayLeniency = in.readBoolean(); + if (in.getVersion().before(Version.V_8_6_0)) { + this.multiValueSupport = in.readBoolean() ? MultiValueSupport.LENIENT : MultiValueSupport.NONE; + } else { + this.multiValueSupport = in.readEnum(MultiValueSupport.class); + } zoneId = readZoneId(in); } @@ -75,7 +93,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeOptionalString(dataType == null ? null : dataType.typeName()); out.writeOptionalString(hitName); - out.writeBoolean(arrayLeniency); + if (out.getVersion().before(Version.V_8_6_0)) { + out.writeBoolean(multiValueSupport != MultiValueSupport.NONE); + } else { + out.writeEnum(multiValueSupport); + } + } @Override @@ -160,8 +183,14 @@ protected Object unwrapFieldsMultiValue(Object values) { return null; } else { if (isPrimitive(list) == false) { - if (list.size() == 1 || arrayLeniency) { + if (list.size() == 1 || multiValueSupport == MultiValueSupport.LENIENT) { return unwrapFieldsMultiValue(list.get(0)); + } else if (multiValueSupport == MultiValueSupport.FULL) { + List unwrappedValues = new ArrayList<>(); + for (Object value : list) { + unwrappedValues.add(unwrapFieldsMultiValue(value)); + } + values = unwrappedValues; } else { throw new QlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); } @@ -170,13 +199,28 @@ protected Object unwrapFieldsMultiValue(Object values) { } Object unwrapped = unwrapCustomValue(values); - if (unwrapped != null) { + if (unwrapped != null && isListOfNulls(unwrapped) == false) { return unwrapped; } return values; } + private boolean isListOfNulls(Object unwrapped) { + if (unwrapped instanceof List list) { + if (list.size() == 0) { + return false; + } + for (Object o : list) { + if (o != null) { + return false; + } + } + return true; + } + return false; + } + protected abstract Object unwrapCustomValue(Object values); protected abstract boolean isPrimitive(List list); @@ -198,8 +242,8 @@ public DataType dataType() { return dataType; } - public boolean arrayLeniency() { - return arrayLeniency; + public MultiValueSupport multiValueSupport() { + return multiValueSupport; } @Override @@ -213,11 +257,11 @@ public boolean equals(Object obj) { return false; } AbstractFieldHitExtractor other = (AbstractFieldHitExtractor) obj; - return fieldName.equals(other.fieldName) && hitName.equals(other.hitName) && arrayLeniency == other.arrayLeniency; + return fieldName.equals(other.fieldName) && hitName.equals(other.hitName) && multiValueSupport == other.multiValueSupport; } @Override public int hashCode() { - return Objects.hash(fieldName, hitName, arrayLeniency); + return Objects.hash(fieldName, hitName, multiValueSupport); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 69f084bf6ed67..8671328ddf1ed 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -39,6 +39,7 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor; import org.elasticsearch.xpack.ql.execution.search.extractor.BucketExtractor; import org.elasticsearch.xpack.ql.execution.search.extractor.ComputingExtractor; import org.elasticsearch.xpack.ql.execution.search.extractor.ConstantExtractor; @@ -96,6 +97,8 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.action.ActionListener.wrap; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.LENIENT; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.NONE; import static org.elasticsearch.xpack.ql.index.VersionCompatibilityChecks.INTRODUCING_UNSIGNED_LONG; // TODO: add retry/back-off @@ -254,7 +257,7 @@ public static byte[] serializeQuery(SearchSourceBuilder source) throws IOExcepti /** * Listener used for local sorting (typically due to aggregations used inside `ORDER BY`). - * + *

* This listener consumes the whole result set, sorts it in memory then sends the paginated * results back to the client. */ @@ -588,7 +591,7 @@ private BucketExtractor createExtractor(FieldExtraction ref, ConstantExtractor t static class SearchHitActionListener extends BaseActionListener { private final QueryContainer query; private final BitSet mask; - private final boolean multiValueFieldLeniency; + private final AbstractFieldHitExtractor.MultiValueSupport multiValueFieldLeniency; private final SearchSourceBuilder source; SearchHitActionListener( @@ -602,7 +605,7 @@ static class SearchHitActionListener extends BaseActionListener { super(listener, client, cfg, output); this.query = query; this.mask = query.columnMask(output); - this.multiValueFieldLeniency = cfg.multiValueFieldLeniency(); + this.multiValueFieldLeniency = cfg.multiValueFieldLeniency() ? LENIENT : NONE; this.source = source; } @@ -633,7 +636,8 @@ private HitExtractor createExtractor(FieldExtraction ref) { } if (ref instanceof ScriptFieldRef f) { - return new FieldHitExtractor(f.name(), null, cfg.zoneId(), multiValueFieldLeniency); + FieldHitExtractor fieldHitExtractor = new FieldHitExtractor(f.name(), null, cfg.zoneId(), multiValueFieldLeniency); + return fieldHitExtractor; } if (ref instanceof ComputedRef computedRef) { @@ -727,14 +731,13 @@ static class AggSortingQueue extends PriorityQueue, Integer>> { * If no tuple exists in {@code sortingColumns} for an output column, it means this column * is not included at all in the ORDER BY * - * - * + * + *

* Take for example ORDER BY a, x, b, y * a, b - are sorted in ES * x, y - need to be sorted client-side * sorting on x kicks in only if the values for a are equal. * sorting on y kicks in only if the values for a, x and b are all equal - * */ // thanks to @jpountz for the row ordering idea as a way to preserve ordering @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 67b430d5ba141..f4a73f5f9eb8d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -45,16 +45,16 @@ public class FieldHitExtractor extends AbstractFieldHitExtractor { */ static final String NAME = "f"; - public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, boolean arrayLeniency) { - super(name, dataType, zoneId, arrayLeniency); + public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, MultiValueSupport multiValueSupport) { + super(name, dataType, zoneId, multiValueSupport); } public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId) { super(name, dataType, zoneId); } - public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, String hitName, boolean arrayLeniency) { - super(name, dataType, zoneId, hitName, arrayLeniency); + public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, String hitName, MultiValueSupport multiValueSupport) { + super(name, dataType, zoneId, hitName, multiValueSupport); } public FieldHitExtractor(StreamInput in) throws IOException { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index ef77edc07e3c8..3a8e5e5ea7a45 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -30,6 +30,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.NONE; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; import static org.elasticsearch.xpack.ql.util.CollectionUtils.combine; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; @@ -74,7 +75,7 @@ protected ComputingExtractor mutateInstance(ComputingExtractor instance) throws public void testGet() { String fieldName = randomAlphaOfLength(5); ChainingProcessor extractor = new ChainingProcessor( - new HitExtractorProcessor(new FieldHitExtractor(fieldName, DOUBLE, UTC, false)), + new HitExtractorProcessor(new FieldHitExtractor(fieldName, DOUBLE, UTC, NONE)), new MathProcessor(MathOperation.LOG) ); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 15615c70b0442..bfeab90bcfdee 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -34,6 +34,8 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.time.DateUtils.toMilliSeconds; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.LENIENT; +import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.NONE; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; @@ -47,7 +49,7 @@ public class FieldHitExtractorTests extends AbstractSqlWireSerializingTestCase randomFrom(SqlDataTypes.types())), randomValueOtherThan(instance.zoneId(), ESTestCase::randomZone), instance.hitName() + "mutated", - randomBoolean() + randomBoolean() ? NONE : LENIENT ); } @@ -125,14 +127,14 @@ public void testGetDate() { List documentFieldValues = Collections.singletonList(StringUtils.toString(zdt)); DocumentField field = new DocumentField("my_date_nanos_field", documentFieldValues); SearchHit hit = new SearchHit(1, null, singletonMap("my_date_nanos_field", field), null); - FieldHitExtractor extractor = new FieldHitExtractor("my_date_nanos_field", DATETIME, zoneId, true); + FieldHitExtractor extractor = new FieldHitExtractor("my_date_nanos_field", DATETIME, zoneId, LENIENT); assertEquals(zdt, extractor.extract(hit)); } public void testToString() { assertEquals( "hit.field@hit@Europe/Berlin", - new FieldHitExtractor("hit.field", null, ZoneId.of("Europe/Berlin"), "hit", false).toString() + new FieldHitExtractor("hit.field", null, ZoneId.of("Europe/Berlin"), "hit", NONE).toString() ); } @@ -163,7 +165,7 @@ public void testMultiValuedSource() { } public void testMultiValuedSourceAllowed() { - FieldHitExtractor fe = new FieldHitExtractor("a", null, UTC, true); + FieldHitExtractor fe = new FieldHitExtractor("a", null, UTC, LENIENT); Object valueA = randomValue(); Object valueB = randomValue(); DocumentField field = new DocumentField("a", asList(valueA, valueB)); @@ -173,7 +175,7 @@ public void testMultiValuedSourceAllowed() { public void testGeoShapeExtraction() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, false); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, NONE); Map map = Maps.newMapWithExpectedSize(2); map.put("coordinates", asList(1d, 2d)); @@ -186,7 +188,7 @@ public void testGeoShapeExtraction() { public void testMultipleGeoShapeExtraction() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, false); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, NONE); Map map1 = Maps.newMapWithExpectedSize(2); map1.put("coordinates", asList(1d, 2d)); @@ -200,7 +202,7 @@ public void testMultipleGeoShapeExtraction() { QlIllegalArgumentException ex = expectThrows(QlIllegalArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); - FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, true); + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, LENIENT); assertEquals( new GeoShape(3, 4), lenientFe.extract( @@ -217,7 +219,7 @@ public void testUnsignedLongExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); SearchHit hit = new SearchHit(1, null, singletonMap(fieldName, field), null); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, UNSIGNED_LONG, randomZone(), randomBoolean()); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, UNSIGNED_LONG, randomZone(), randomBoolean() ? NONE : LENIENT); assertEquals(bi, fe.extract(hit)); } @@ -230,7 +232,7 @@ public void testVersionExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); SearchHit hit = new SearchHit(1, null, singletonMap(fieldName, field), null); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, VERSION, randomZone(), randomBoolean()); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, VERSION, randomZone(), randomBoolean() ? NONE : LENIENT); assertEquals(version.toString(), fe.extract(hit).toString()); } From 3df396e4b717afb430dd2b012404f25d43fbb034 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 09:39:32 +0100 Subject: [PATCH 03/26] Mute test for #90336 --- .../org/elasticsearch/threadpool/ScalingThreadPoolTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index e34be078052a8..c2111f01558f9 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -39,6 +39,7 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90336") public void testScalingThreadPoolConfiguration() throws InterruptedException { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); From 2a66741edad7892c6a40d6b0bae704a23609fd1c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 27 Sep 2022 11:41:26 +0200 Subject: [PATCH 04/26] Use chunked encoding for RestGetSettingsAction (#90326) This one needs chunked encoding as well, the response easily grows to O(10M) for large deployments with many indices. Especially when settings are numerous or have large values like e.g. settings from the Beats templates. --- .../settings/get/GetSettingsResponse.java | 72 +++++++++---------- .../admin/indices/RestGetSettingsAction.java | 4 +- .../get/GetSettingsResponseTests.java | 11 +++ 3 files changed, 47 insertions(+), 40 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index 0305f123bba11..9b5047b9937b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -10,9 +10,11 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -23,10 +25,11 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Objects; -public class GetSettingsResponse extends ActionResponse implements ToXContentObject { +public class GetSettingsResponse extends ActionResponse implements ToXContentObject, ChunkedToXContent { private final Map indexToSettings; private final Map indexToDefaultSettings; @@ -44,25 +47,12 @@ public GetSettingsResponse(StreamInput in) throws IOException { /** * Returns a map of index name to {@link Settings} object. The returned {@link Settings} - * objects contain only those settings explicitly set on a given index. Any settings - * taking effect as defaults must be accessed via {@link #getIndexToDefaultSettings()}. + * objects contain only those settings explicitly set on a given index. */ public Map getIndexToSettings() { return indexToSettings; } - /** - * If the originating {@link GetSettingsRequest} object was configured to include - * defaults, this will contain a mapping of index name to {@link Settings} objects. - * The returned {@link Settings} objects will contain only those settings taking - * effect as defaults. Any settings explicitly set on the index will be available - * via {@link #getIndexToSettings()}. - * See also {@link GetSettingsRequest#includeDefaults(boolean)} - */ - public Map getIndexToDefaultSettings() { - return indexToDefaultSettings; - } - /** * Returns the string value for the specified index and setting. If the includeDefaults * flag was not set or set to false on the GetSettingsRequest, this method will only @@ -154,7 +144,10 @@ public String toString() { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); XContentBuilder builder = new XContentBuilder(JsonXContent.jsonXContent, baos); - toXContent(builder, ToXContent.EMPTY_PARAMS, false); + var iterator = toXContentChunked(false); + while (iterator.hasNext()) { + iterator.next().toXContent(builder, ToXContent.EMPTY_PARAMS); + } return Strings.toString(builder); } catch (IOException e) { throw new IllegalStateException(e); // should not be possible here @@ -162,30 +155,33 @@ public String toString() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, indexToDefaultSettings.isEmpty()); + public Iterator toXContentChunked() { + final boolean omitEmptySettings = indexToDefaultSettings.isEmpty(); + return toXContentChunked(omitEmptySettings); } - private XContentBuilder toXContent(XContentBuilder builder, Params params, boolean omitEmptySettings) throws IOException { - builder.startObject(); - for (Map.Entry cursor : getIndexToSettings().entrySet()) { - // no settings, jump over it to shorten the response data - if (omitEmptySettings && cursor.getValue().isEmpty()) { - continue; - } - builder.startObject(cursor.getKey()); - builder.startObject("settings"); - cursor.getValue().toXContent(builder, params); - builder.endObject(); - if (indexToDefaultSettings.isEmpty() == false) { - builder.startObject("defaults"); - indexToDefaultSettings.get(cursor.getKey()).toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - return builder; + private Iterator toXContentChunked(boolean omitEmptySettings) { + final boolean indexToDefaultSettingsEmpty = indexToDefaultSettings.isEmpty(); + return Iterators.concat( + Iterators.single((builder, params) -> builder.startObject()), + getIndexToSettings().entrySet() + .stream() + .filter(entry -> omitEmptySettings == false || entry.getValue().isEmpty() == false) + .map(entry -> (ToXContent) (builder, params) -> { + builder.startObject(entry.getKey()); + builder.startObject("settings"); + entry.getValue().toXContent(builder, params); + builder.endObject(); + if (indexToDefaultSettingsEmpty == false) { + builder.startObject("defaults"); + indexToDefaultSettings.get(entry.getKey()).toXContent(builder, params); + builder.endObject(); + } + return builder.endObject(); + }) + .iterator(), + Iterators.single((builder, params) -> builder.endObject()) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index b9b244c7d2c35..a7f1b5dabf243 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -52,6 +52,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestToXContentListener<>(channel)); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestChunkedToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java index 2764eeaa2a11a..8e904db944417 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java @@ -74,4 +74,15 @@ protected Predicate getRandomFieldsExcludeFilter() { return f -> f.equals("") || f.contains(".settings") || f.contains(".defaults"); } + public void testOneChunkPerIndex() { + final var instance = createTestInstance(); + final var iterator = instance.toXContentChunked(); + int chunks = 0; + while (iterator.hasNext()) { + chunks++; + iterator.next(); + } + assertEquals(2 + instance.getIndexToSettings().size(), chunks); + } + } From 136ac4934ac487374ac6c4805b9d8ee0acd2376a Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 10:44:16 +0100 Subject: [PATCH 05/26] AwaitsFix for #90392 --- .../search/profile/SearchProfileDfsPhaseResultTests.java | 5 +++++ .../search/profile/SearchProfileResultsTests.java | 5 +++++ .../org/elasticsearch/test/AbstractSerializingTestCase.java | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java index fa68d4420df23..4fb10a5d70af6 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java @@ -35,4 +35,9 @@ protected Reader instanceReader() { protected SearchProfileDfsPhaseResult doParseInstance(XContentParser parser) throws IOException { return SearchProfileDfsPhaseResult.fromXContent(parser); } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90392") + public void testFromXContent() throws IOException { + super.testFromXContent(); + } } diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java index 256c60c5ab4f7..7ce348064f71d 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java @@ -57,4 +57,9 @@ protected SearchProfileResults doParseInstance(XContentParser parser) throws IOE protected Predicate getRandomFieldsExcludeFilter() { return ProfileResultTests.RANDOM_FIELDS_EXCLUDE_FILTER; } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90392") + public void testFromXContent() throws IOException { + super.testFromXContent(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index a880ac6bc18cc..9dfee23bd9c90 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -35,7 +35,7 @@ public abstract class AbstractSerializingTestCase Date: Tue, 27 Sep 2022 13:17:07 +0200 Subject: [PATCH 06/26] Speed up SnapshotInProgressAllocationDecider (#90351) We can check the flag for whether the snapshot has any shards in init state for free before going for the more expensive map lookup and potential checks on the found shard status. This helps speed things up quite a bit in case of having multiple snapshots queued up for a shard. --- .../decider/SnapshotInProgressAllocationDecider.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 8a7357d629d3b..75099d067db57 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.index.shard.ShardId; -import java.util.function.Predicate; - /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that * are currently been snapshotted to be moved to other nodes. @@ -65,7 +63,7 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all final ShardId shardId = shardRouting.shardId(); return snapshotsInProgress.asStream() - .filter(Predicate.not(SnapshotsInProgress.Entry::isClone)) + .filter(entry -> entry.hasShardsInInitState() && entry.isClone() == false) .map(entry -> entry.shards().get(shardId)) .filter( shardSnapshotStatus -> shardSnapshotStatus != null From 09eafed22f9185401667604ef3fa4321cce58dca Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 27 Sep 2022 14:16:03 +0200 Subject: [PATCH 07/26] [Transform] Don't fail a transform due to ILM closing an index (#90396) Transform can fail due to a ClusterBlockException that reports to be non-retryable. This is a special kind of race condition where the initial checks pass, but meanwhile between the check and the action something changes. In the particular case a wildcard index pattern got resolved to concrete index names. One of the indices got closed (ILM) before transform run the search operation. Pragmatically we should handle a cluster block exception as retry-able error. fixes #89802 --- docs/changelog/90396.yaml | 6 ++++ .../transforms/TransformFailureHandler.java | 4 +++ .../TransformFailureHandlerTests.java | 33 +++++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 docs/changelog/90396.yaml diff --git a/docs/changelog/90396.yaml b/docs/changelog/90396.yaml new file mode 100644 index 0000000000000..fbe2b636aa72e --- /dev/null +++ b/docs/changelog/90396.yaml @@ -0,0 +1,6 @@ +pr: 90396 +summary: Don't fail a transform on a ClusterBlockException, this may be due to ILM closing an index +area: Transform +type: bug +issues: + - 89802 diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index f087d2b789464..30122eb9f25b1 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.script.ScriptException; import org.elasticsearch.xpack.core.transform.TransformMessages; @@ -65,6 +66,9 @@ void handleIndexerFailure(Exception e, SettingsConfig settingsConfig) { handleScriptException(scriptException, unattended); } else if (unwrappedException instanceof BulkIndexingException bulkIndexingException) { handleBulkIndexingException(bulkIndexingException, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ClusterBlockException clusterBlockException) { + // gh#89802 always retry for a cluster block exception, because a cluster block should be temporary. + retry(clusterBlockException, clusterBlockException.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); } else if (unwrappedException instanceof ElasticsearchException elasticsearchException) { handleElasticsearchException(elasticsearchException, unattended, getNumFailureRetries(settingsConfig)); } else if (unwrappedException instanceof IllegalArgumentException illegalArgumentException) { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java index 6a66d0b53fdf9..0218f5ae86226 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.rest.RestStatus; @@ -20,6 +22,9 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; +import java.util.Map; +import java.util.Set; + import static java.util.Collections.singletonList; public class TransformFailureHandlerTests extends ESTestCase { @@ -113,6 +118,34 @@ public void testUnattended() { assertNoFailure(handler, new NullPointerException("NPE"), contextListener, settings); } + public void testClusterBlock() { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).build(); + + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + final ClusterBlockException clusterBlock = new ClusterBlockException( + Map.of("test-index", Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK)) + ); + + handler.handleIndexerFailure(clusterBlock, settings); + assertFalse(contextListener.getFailed()); + assertEquals(1, contextListener.getFailureCountChangedCounter()); + + handler.handleIndexerFailure(clusterBlock, settings); + assertFalse(contextListener.getFailed()); + assertEquals(2, contextListener.getFailureCountChangedCounter()); + + handler.handleIndexerFailure(clusterBlock, settings); + assertTrue(contextListener.getFailed()); + assertEquals(3, contextListener.getFailureCountChangedCounter()); + } + private void assertNoFailure( TransformFailureHandler handler, Exception e, From 24cf87186dd976b1cafb99e5774e6d6515420472 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 27 Sep 2022 14:44:30 +0200 Subject: [PATCH 08/26] Limit shard realocation retries (#90296) This change ensures that elasticsearch would not indefinitely retry relocating shard if operation fails. --- docs/changelog/90296.yaml | 5 + docs/reference/search/search-shards.asciidoc | 35 +++- .../indices/IndicesLifecycleListenerIT.java | 27 +++ .../routing/RelocationFailureInfo.java | 56 ++++++ .../routing/RoutingChangesObserver.java | 5 + .../cluster/routing/RoutingNodes.java | 64 +++--- .../cluster/routing/ShardRouting.java | 85 +++++++- .../routing/allocation/AllocationService.java | 2 +- .../RoutingNodesChangedObserver.java | 7 + .../decider/DiskThresholdDecider.java | 4 +- .../decider/MaxRetryAllocationDecider.java | 62 ++++-- .../cluster/ClusterStateTests.java | 18 ++ .../routing/RelocationFailureInfoTests.java | 32 +++ .../cluster/routing/ShardRoutingTests.java | 5 + .../MaxRetryAllocationDeciderTests.java | 183 ++++++++++-------- .../cluster/routing/ShardRoutingHelper.java | 2 + .../cluster/routing/TestShardRouting.java | 13 ++ 17 files changed, 466 insertions(+), 139 deletions(-) create mode 100644 docs/changelog/90296.yaml create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/RelocationFailureInfo.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/RelocationFailureInfoTests.java diff --git a/docs/changelog/90296.yaml b/docs/changelog/90296.yaml new file mode 100644 index 0000000000000..dd021ec08bf46 --- /dev/null +++ b/docs/changelog/90296.yaml @@ -0,0 +1,5 @@ +pr: 90296 +summary: Limit shard realocation retries +area: Allocation +type: enhancement +issues: [] diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 87a265bf5c8e3..d0a518865baf4 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -87,55 +87,70 @@ The API returns the following result: { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 0, "state": "STARTED", "allocation_id": {"id":"0TvkCyF7TAmM1wHP4a42-A"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ], [ { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 1, "state": "STARTED", "allocation_id": {"id":"fMju3hd1QHWmWrIgFnI4Ww"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ], [ { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 2, "state": "STARTED", "allocation_id": {"id":"Nwl0wbMBTHCWjEEbGYGapg"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ], [ { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 3, "state": "STARTED", "allocation_id": {"id":"bU_KLGJISbW0RejwnwDPKw"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ], [ { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 4, "state": "STARTED", "allocation_id": {"id":"DMs7_giNSwmdqVukF7UydA"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ] ] @@ -171,22 +186,28 @@ The API returns the following result: { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 2, "state": "STARTED", "allocation_id": {"id":"fMju3hd1QHWmWrIgFnI4Ww"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ], [ { "index": "my-index-000001", "node": "JklnKbD7Tyqi9TP3_Q_tBg", + "relocating_node": null, "primary": true, "shard": 3, "state": "STARTED", "allocation_id": {"id":"0TvkCyF7TAmM1wHP4a42-A"}, - "relocating_node": null + "relocation_failure_info" : { + "failed_attempts" : 0 + } } ] ] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index 7f3c9b0d2430a..6ddb8fdc52d83 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -43,6 +43,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.shard.IndexShardState.CLOSED; @@ -137,6 +138,32 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { assertThat(state.nodes().resolveNode(shard.get(0).currentNodeId()).getName(), Matchers.equalTo(node1)); } + public void testRelocationFailureNotRetriedForever() { + String node1 = internalCluster().startNode(); + client().admin() + .indices() + .prepareCreate("index1") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .get(); + ensureGreen("index1"); + for (int i = 0; i < 2; i++) { + internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, internalCluster().startNode()) + .setNewDelegate(new IndexShardStateChangeListener() { + @Override + public void beforeIndexCreated(Index index, Settings indexSettings) { + throw new RuntimeException("FAIL"); + } + }); + } + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("index1") + .setSettings(Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", node1)) + ); + ensureGreen("index1"); + } + public void testIndexStateShardChanged() throws Throwable { // start with a single node String node1 = internalCluster().startNode(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RelocationFailureInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/RelocationFailureInfo.java new file mode 100644 index 0000000000000..0776cbe8d556e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RelocationFailureInfo.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Holds additional information as to why the shard failed to relocate. + */ +public record RelocationFailureInfo(int failedRelocations) implements ToXContentFragment, Writeable { + + public static final RelocationFailureInfo NO_FAILURES = new RelocationFailureInfo(0); + + public RelocationFailureInfo { + assert failedRelocations >= 0 : "Expect non-negative failures count, got: " + failedRelocations; + } + + public static RelocationFailureInfo readFrom(StreamInput in) throws IOException { + int failures = in.readVInt(); + return failures == 0 ? NO_FAILURES : new RelocationFailureInfo(failures); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(failedRelocations); + } + + public RelocationFailureInfo incFailedRelocations() { + return new RelocationFailureInfo(failedRelocations + 1); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("relocation_failure_info"); + builder.field("failed_attempts", failedRelocations); + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return "failed_attempts[" + failedRelocations + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java index acdcba613c776..94869c3c8b845 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java @@ -32,6 +32,11 @@ default void relocationStarted(ShardRouting startedShard, ShardRouting targetRel */ default void unassignedInfoUpdated(ShardRouting unassignedShard, UnassignedInfo newUnassignedInfo) {} + /** + * Called when a relocating shard's failure information was updated + */ + default void relocationFailureInfoUpdated(ShardRouting relocatedShard, RelocationFailureInfo relocationFailureInfo) {} + /** * Called when a shard is failed or cancelled. */ diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index c574c267d330f..2a4524e861883 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -990,30 +990,6 @@ public void ignoreShard(ShardRouting shard, AllocationStatus allocationStatus, R ignored.add(shard); } - public void resetFailedAllocationCounter(RoutingChangesObserver routingChangesObserver) { - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = iterator(); - while (unassignedIterator.hasNext()) { - ShardRouting shardRouting = unassignedIterator.next(); - UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - unassignedIterator.updateUnassigned( - new UnassignedInfo( - unassignedInfo.getNumFailedAllocations() > 0 ? UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), - unassignedInfo.getMessage(), - unassignedInfo.getFailure(), - 0, - unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), - unassignedInfo.isDelayed(), - unassignedInfo.getLastAllocationStatus(), - Collections.emptySet(), - unassignedInfo.getLastAllocatedNodeId() - ), - shardRouting.recoverySource(), - routingChangesObserver - ); - } - } - public class UnassignedIterator implements Iterator, ExistingShardsAllocator.UnassignedAllocationHandler { private final ListIterator iterator; @@ -1293,6 +1269,46 @@ private void ensureMutable() { } } + public void resetFailedCounter(RoutingChangesObserver routingChangesObserver) { + final var unassignedIterator = unassigned().iterator(); + while (unassignedIterator.hasNext()) { + ShardRouting shardRouting = unassignedIterator.next(); + UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getNumFailedAllocations() > 0 ? UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + 0, + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + unassignedInfo.isDelayed(), + unassignedInfo.getLastAllocationStatus(), + Collections.emptySet(), + unassignedInfo.getLastAllocatedNodeId() + ), + shardRouting.recoverySource(), + routingChangesObserver + ); + } + + for (RoutingNode routingNode : this) { + var shardsWithRelocationFailures = new ArrayList(); + for (ShardRouting shardRouting : routingNode) { + if (shardRouting.relocationFailureInfo() != null && shardRouting.relocationFailureInfo().failedRelocations() > 0) { + shardsWithRelocationFailures.add(shardRouting); + } + } + + for (ShardRouting original : shardsWithRelocationFailures) { + ShardRouting updated = original.updateRelocationFailure(RelocationFailureInfo.NO_FAILURES); + routingNode.update(original, updated); + assignedShardsRemove(original); + assignedShardsAdd(updated); + } + } + } + /** * Creates an iterator over shards interleaving between nodes: The iterator returns the first shard from * the first node, then the first shard of the second node, etc. until one shard from each node has been returned. diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 5e2cb2959af95..80eb164142d1c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -36,6 +36,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { */ public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1; private static final Version EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION = Version.V_8_5_0; + private static final Version RELOCATION_FAILURE_INFO_VERSION = Version.V_8_6_0; private final ShardId shardId; private final String currentNodeId; @@ -49,8 +50,11 @@ public final class ShardRouting implements Writeable, ToXContentObject { private final String relocatingNodeId; private final boolean primary; private final ShardRoutingState state; + @Nullable private final RecoverySource recoverySource; + @Nullable private final UnassignedInfo unassignedInfo; + private final RelocationFailureInfo relocationFailureInfo; private final AllocationId allocationId; private final long expectedShardSize; @Nullable @@ -68,6 +72,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { ShardRoutingState state, RecoverySource recoverySource, UnassignedInfo unassignedInfo, + RelocationFailureInfo relocationFailureInfo, AllocationId allocationId, long expectedShardSize ) { @@ -78,10 +83,12 @@ public final class ShardRouting implements Writeable, ToXContentObject { this.state = state; this.recoverySource = recoverySource; this.unassignedInfo = unassignedInfo; + this.relocationFailureInfo = relocationFailureInfo; this.allocationId = allocationId; this.expectedShardSize = expectedShardSize; this.targetRelocatingShard = initializeTargetRelocatingShard(); assert (state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) == false : "unassigned shard must be created with meta"; + assert relocationFailureInfo != null : "relocation failure info must be always set"; assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state; assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary @@ -103,6 +110,7 @@ private ShardRouting initializeTargetRelocatingShard() { ShardRoutingState.INITIALIZING, PeerRecoverySource.INSTANCE, unassignedInfo, + RelocationFailureInfo.NO_FAILURES, AllocationId.newTargetRelocation(allocationId), expectedShardSize ); @@ -128,6 +136,7 @@ public static ShardRouting newUnassigned( ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, + RelocationFailureInfo.NO_FAILURES, null, UNAVAILABLE_EXPECTED_SHARD_SIZE ); @@ -241,6 +250,11 @@ public UnassignedInfo unassignedInfo() { return unassignedInfo; } + @Nullable + public RelocationFailureInfo relocationFailureInfo() { + return relocationFailureInfo; + } + /** * An id that uniquely identifies an allocation. */ @@ -289,16 +303,19 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { recoverySource = null; } unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); + if (in.getVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { + relocationFailureInfo = RelocationFailureInfo.readFrom(in); + } else { + relocationFailureInfo = RelocationFailureInfo.NO_FAILURES; + } allocationId = in.readOptionalWriteable(AllocationId::new); - final long shardSize; if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING || (state == ShardRoutingState.STARTED && in.getVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) { - shardSize = in.readLong(); + expectedShardSize = in.readLong(); } else { - shardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; + expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; } - expectedShardSize = shardSize; targetRelocatingShard = initializeTargetRelocatingShard(); } @@ -321,6 +338,9 @@ public void writeToThin(StreamOutput out) throws IOException { recoverySource.writeTo(out); } out.writeOptionalWriteable(unassignedInfo); + if (out.getVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { + relocationFailureInfo.writeTo(out); + } out.writeOptionalWriteable(allocationId); if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING @@ -336,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { } public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource) { - assert this.unassignedInfo != null : "can only update unassign info if they are already set"; + assert this.unassignedInfo != null : "can only update unassigned info if it is already set"; assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed"; return new ShardRouting( shardId, @@ -346,6 +366,23 @@ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySour state, recoverySource, unassignedInfo, + relocationFailureInfo, + allocationId, + expectedShardSize + ); + } + + public ShardRouting updateRelocationFailure(RelocationFailureInfo relocationFailureInfo) { + assert this.relocationFailureInfo != null : "can only update relocation failure info info if it is already set"; + return new ShardRouting( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + unassignedInfo, + relocationFailureInfo, allocationId, expectedShardSize ); @@ -374,6 +411,7 @@ public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) { ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, + RelocationFailureInfo.NO_FAILURES, null, UNAVAILABLE_EXPECTED_SHARD_SIZE ); @@ -401,6 +439,7 @@ public ShardRouting initialize(String nodeId, @Nullable String existingAllocatio ShardRoutingState.INITIALIZING, recoverySource, unassignedInfo, + RelocationFailureInfo.NO_FAILURES, allocationId, expectedShardSize ); @@ -421,6 +460,7 @@ public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) { ShardRoutingState.RELOCATING, recoverySource, null, + relocationFailureInfo, AllocationId.newRelocation(allocationId), expectedShardSize ); @@ -442,6 +482,25 @@ public ShardRouting cancelRelocation() { ShardRoutingState.STARTED, recoverySource, null, + relocationFailureInfo.incFailedRelocations(), + AllocationId.cancelRelocation(allocationId), + UNAVAILABLE_EXPECTED_SHARD_SIZE + ); + } + + public ShardRouting asBeforeRelocation() { + assert state == ShardRoutingState.RELOCATING : this; + assert assignedToNode() : this; + assert relocatingNodeId != null : this; + return new ShardRouting( + shardId, + currentNodeId, + null, + primary, + ShardRoutingState.STARTED, + recoverySource, + null, + RelocationFailureInfo.NO_FAILURES, AllocationId.cancelRelocation(allocationId), UNAVAILABLE_EXPECTED_SHARD_SIZE ); @@ -465,6 +524,7 @@ public ShardRouting removeRelocationSource() { state, recoverySource, unassignedInfo, + relocationFailureInfo, AllocationId.finishRelocation(allocationId), expectedShardSize ); @@ -485,6 +545,7 @@ public ShardRouting reinitializeReplicaShard() { ShardRoutingState.INITIALIZING, recoverySource, unassignedInfo, + relocationFailureInfo, AllocationId.newInitializing(), expectedShardSize ); @@ -511,6 +572,7 @@ public ShardRouting moveToStarted(long expectedShardSize) { ShardRoutingState.STARTED, null, null, + RelocationFailureInfo.NO_FAILURES, allocationId, expectedShardSize ); @@ -534,6 +596,7 @@ public ShardRouting moveActiveReplicaToPrimary() { state, recoverySource, unassignedInfo, + relocationFailureInfo, allocationId, expectedShardSize ); @@ -557,6 +620,7 @@ public ShardRouting moveUnassignedFromPrimary() { state, PeerRecoverySource.INSTANCE, unassignedInfo, + relocationFailureInfo, allocationId, expectedShardSize ); @@ -699,7 +763,9 @@ public boolean equals(Object o) { return false; } ShardRouting that = (ShardRouting) o; - return Objects.equals(unassignedInfo, that.unassignedInfo) && equalsIgnoringMetadata(that); + return equalsIgnoringMetadata(that) + && Objects.equals(unassignedInfo, that.unassignedInfo) + && Objects.equals(relocationFailureInfo, that.relocationFailureInfo); } /** @@ -720,6 +786,7 @@ public int hashCode() { h = 31 * h + (recoverySource != null ? recoverySource.hashCode() : 0); h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0); h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0); + h = 31 * h + (relocationFailureInfo != null ? relocationFailureInfo.hashCode() : 0); hashCode = h; } return h; @@ -752,9 +819,10 @@ public String shortSummary() { if (allocationId != null) { sb.append(", a").append(allocationId); } - if (this.unassignedInfo != null) { - sb.append(", ").append(unassignedInfo.toString()); + if (unassignedInfo != null) { + sb.append(", ").append(unassignedInfo); } + sb.append(", ").append(relocationFailureInfo); if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) { sb.append(", expected_shard_size[").append(expectedShardSize).append("]"); } @@ -783,6 +851,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (unassignedInfo != null) { unassignedInfo.toXContent(builder, params); } + relocationFailureInfo.toXContent(builder, params); return builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 1bbdf759d8847..0b821d7c7f144 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -387,7 +387,7 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand allocation.ignoreDisable(true); if (retryFailed) { - allocation.routingNodes().unassigned().resetFailedAllocationCounter(allocation.changes()); + allocation.routingNodes().resetFailedCounter(allocation.changes()); } RoutingExplanations explanations = commands.execute(allocation, explain); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java index 81c2592b23ceb..96f7502f8f4a4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.cluster.routing.RelocationFailureInfo; import org.elasticsearch.cluster.routing.RoutingChangesObserver; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -53,6 +54,12 @@ public void unassignedInfoUpdated(ShardRouting unassignedShard, UnassignedInfo n setChanged(); } + @Override + public void relocationFailureInfoUpdated(ShardRouting relocatedShard, RelocationFailureInfo relocationFailureInfo) { + assert relocatedShard.active() : "expected active shard " + relocatedShard; + setChanged(); + } + @Override public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) { assert failedShard.assignedToNode() : "expected assigned shard " + failedShard; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 792c1db27ae2b..df932c566a1fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -162,9 +162,11 @@ public static long sizeOfUnaccountedShards( if (subtractShardsMovingAway) { for (ShardRouting routing : node.relocating()) { String actualPath = clusterInfo.getDataPath(routing); + // This branch is needed as the map key contains some information that might have changed (eg shard status) + // It could be removed once https://github.com/elastic/elasticsearch/issues/90109 is fixed if (actualPath == null) { // we might know the path of this shard from before when it was relocating - actualPath = clusterInfo.getDataPath(routing.cancelRelocation()); + actualPath = clusterInfo.getDataPath(routing.asBeforeRelocation()); } if (dataPath.equals(actualPath)) { totalSize -= getExpectedShardSize(routing, 0L, clusterInfo, null, metadata, routingTable); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index e039eec612f94..3ee48759c6593 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RelocationFailureInfo; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -40,35 +40,34 @@ public class MaxRetryAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { - final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + final int maxRetries = SETTING_ALLOCATION_MAX_RETRY.get(allocation.metadata().getIndexSafe(shardRouting.index()).getSettings()); + + final var unassignedInfo = shardRouting.unassignedInfo(); final int numFailedAllocations = unassignedInfo == null ? 0 : unassignedInfo.getNumFailedAllocations(); if (numFailedAllocations > 0) { - return decisionWithFailures(shardRouting, allocation, unassignedInfo, numFailedAllocations); + final var decision = numFailedAllocations >= maxRetries ? Decision.NO : Decision.YES; + return allocation.debugDecision() ? debugDecision(decision, unassignedInfo, numFailedAllocations, maxRetries) : decision; + } + + final var relocationFailureInfo = shardRouting.relocationFailureInfo(); + final int numFailedRelocations = relocationFailureInfo == null ? 0 : relocationFailureInfo.failedRelocations(); + if (numFailedRelocations > 0) { + final var decision = numFailedRelocations >= maxRetries ? Decision.NO : Decision.YES; + return allocation.debugDecision() ? debugDecision(decision, relocationFailureInfo, numFailedRelocations, maxRetries) : decision; } - return YES_NO_FAILURES; - } - private static Decision decisionWithFailures( - ShardRouting shardRouting, - RoutingAllocation allocation, - UnassignedInfo unassignedInfo, - int numFailedAllocations - ) { - final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetadata.getSettings()); - final Decision res = numFailedAllocations >= maxRetry ? Decision.NO : Decision.YES; - return allocation.debugDecision() ? debugDecision(res, unassignedInfo, numFailedAllocations, maxRetry) : res; + return YES_NO_FAILURES; } - private static Decision debugDecision(Decision decision, UnassignedInfo unassignedInfo, int numFailedAllocations, int maxRetry) { + private static Decision debugDecision(Decision decision, UnassignedInfo info, int numFailedAllocations, int maxRetries) { if (decision.type() == Decision.Type.NO) { return Decision.single( Decision.Type.NO, NAME, - "shard has exceeded the maximum number of retries [%d] on " - + "failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", - maxRetry, - unassignedInfo.toString() + "shard has exceeded the maximum number of retries [%d] on failed allocation attempts - " + + "manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", + maxRetries, + info.toString() ); } else { return Decision.single( @@ -76,7 +75,28 @@ private static Decision debugDecision(Decision decision, UnassignedInfo unassign NAME, "shard has failed allocating [%d] times but [%d] retries are allowed", numFailedAllocations, - maxRetry + maxRetries + ); + } + } + + private static Decision debugDecision(Decision decision, RelocationFailureInfo info, int numFailedRelocations, int maxRetries) { + if (decision.type() == Decision.Type.NO) { + return Decision.single( + Decision.Type.NO, + NAME, + "shard has exceeded the maximum number of retries [%d] on failed relocation attempts - " + + "manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", + maxRetries, + info.toString() + ); + } else { + return Decision.single( + Decision.Type.YES, + NAME, + "shard has failed relocating [%d] times but [%d] retries are allowed", + numFailedRelocations, + maxRetries ); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index d02becf3cd996..e75ea5722ef87 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -301,6 +301,9 @@ public void testToXContent() throws IOException { "index": "index", "allocation_id": { "id": "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ] @@ -321,6 +324,9 @@ public void testToXContent() throws IOException { "index": "index", "allocation_id": { "id": "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ], @@ -505,6 +511,9 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "index" : "index", "allocation_id" : { "id" : "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ] @@ -525,6 +534,9 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "index" : "index", "allocation_id" : { "id" : "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ], @@ -716,6 +728,9 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "index" : "index", "allocation_id" : { "id" : "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ] @@ -736,6 +751,9 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "index" : "index", "allocation_id" : { "id" : "%s" + }, + "relocation_failure_info" : { + "failed_attempts" : 0 } } ], diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RelocationFailureInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RelocationFailureInfoTests.java new file mode 100644 index 0000000000000..5e76189bc6b2f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RelocationFailureInfoTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class RelocationFailureInfoTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return RelocationFailureInfo::readFrom; + } + + @Override + protected RelocationFailureInfo createTestInstance() { + return new RelocationFailureInfo(randomIntBetween(1, 10)); + } + + @Override + protected RelocationFailureInfo mutateInstance(RelocationFailureInfo instance) throws IOException { + return new RelocationFailureInfo(instance.failedRelocations() + 1); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index de2cc08fc5ff5..74b923c21cf74 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -136,6 +136,7 @@ public void testEqualsIgnoringVersion() { otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), + otherRouting.relocationFailureInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize() ); @@ -150,6 +151,7 @@ public void testEqualsIgnoringVersion() { otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), + otherRouting.relocationFailureInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize() ); @@ -167,6 +169,7 @@ public void testEqualsIgnoringVersion() { otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), + otherRouting.relocationFailureInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize() ); @@ -185,6 +188,7 @@ public void testEqualsIgnoringVersion() { otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), + otherRouting.relocationFailureInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize() ); @@ -208,6 +212,7 @@ public void testEqualsIgnoringVersion() { new IndexId("test", UUIDs.randomBase64UUID(random())) ), otherRouting.unassignedInfo(), + otherRouting.relocationFailureInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize() ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 4a4a6f07c3894..f995acc35c6cf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -25,10 +26,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.test.gateway.TestGatewayAllocator; -import java.util.Collections; import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; @@ -39,44 +42,34 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase { - private AllocationService strategy; - - @Override - public void setUp() throws Exception { - super.setUp(); - strategy = new AllocationService( - new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), - new TestGatewayAllocator(), - new BalancedShardsAllocator(Settings.EMPTY), - EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE - ); - } + private final MaxRetryAllocationDecider decider = new MaxRetryAllocationDecider(); + private final AllocationService strategy = new AllocationService( + new AllocationDeciders(List.of(decider)), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); private ClusterState createInitialClusterState() { - Metadata.Builder metaBuilder = Metadata.builder(); - metaBuilder.put(IndexMetadata.builder("idx").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)); - Metadata metadata = metaBuilder.build(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - routingTableBuilder.addAsNew(metadata.index("idx")); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("idx").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .build(); + RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("idx")).build(); - RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metadata(metadata) .routingTable(routingTable) - .build(); - clusterState = ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); - RoutingTable prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - assertEquals(prevRoutingTable.index("idx").size(), 1); - assertEquals(prevRoutingTable.index("idx").shard(0).shard(0).state(), UNASSIGNED); + assertEquals(clusterState.routingTable().index("idx").size(), 1); + assertEquals(clusterState.routingTable().index("idx").shard(0).shard(0).state(), UNASSIGNED); - assertEquals(routingTable.index("idx").size(), 1); - assertEquals(routingTable.index("idx").shard(0).shard(0).state(), INITIALIZING); + clusterState = strategy.reroute(clusterState, "reroute"); + + assertEquals(clusterState.routingTable().index("idx").size(), 1); + assertEquals(clusterState.routingTable().index("idx").shard(0).shard(0).state(), INITIALIZING); return clusterState; } @@ -86,15 +79,7 @@ public void testSingleRetryOnIgnore() { final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); // now fail it N-1 times for (int i = 0; i < retries - 1; i++) { - List failedShards = Collections.singletonList( - new FailedShard( - routingTable.index("idx").shard(0).shard(0), - "boom" + i, - new UnsupportedOperationException(), - randomBoolean() - ) - ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + ClusterState newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom" + i); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -104,10 +89,7 @@ public void testSingleRetryOnIgnore() { assertThat(routingTable.index("idx").shard(0).shard(0).unassignedInfo().getMessage(), containsString("boom" + i)); } // now we go and check that we are actually stick to unassigned on the next failure - List failedShards = Collections.singletonList( - new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) - ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + ClusterState newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -130,11 +112,7 @@ public void testSingleRetryOnIgnore() { // again fail it N-1 times for (int i = 0; i < retries - 1; i++) { - failedShards = Collections.singletonList( - new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) - ); - - newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -145,10 +123,7 @@ public void testSingleRetryOnIgnore() { } // now we go and check that we are actually stick to unassigned on the next failure - failedShards = Collections.singletonList( - new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) - ); - newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -164,15 +139,7 @@ public void testFailedAllocation() { final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); // now fail it N-1 times for (int i = 0; i < retries - 1; i++) { - List failedShards = Collections.singletonList( - new FailedShard( - routingTable.index("idx").shard(0).shard(0), - "boom" + i, - new UnsupportedOperationException(), - randomBoolean() - ) - ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + ClusterState newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom" + i); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -184,15 +151,12 @@ public void testFailedAllocation() { // MaxRetryAllocationDecider#canForceAllocatePrimary should return YES decisions because canAllocate returns YES here assertEquals( Decision.Type.YES, - new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() + decider.canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() ); } // now we go and check that we are actually stick to unassigned on the next failure { - List failedShards = Collections.singletonList( - new FailedShard(routingTable.index("idx").shard(0).shard(0), "boom", new UnsupportedOperationException(), randomBoolean()) - ); - ClusterState newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + ClusterState newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "boom"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -204,7 +168,7 @@ public void testFailedAllocation() { // MaxRetryAllocationDecider#canForceAllocatePrimary should return a NO decision because canAllocate returns NO here assertEquals( Decision.Type.NO, - new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() + decider.canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() ); } @@ -240,11 +204,7 @@ public void testFailedAllocation() { // bumped up the max retry count, so canForceAllocatePrimary should return a YES decision assertEquals( Decision.Type.YES, - new MaxRetryAllocationDecider().canForceAllocatePrimary( - routingTable.index("idx").shard(0).shard(0), - null, - newRoutingAllocation(clusterState) - ).type() + decider.canForceAllocatePrimary(routingTable.index("idx").shard(0).shard(0), null, newRoutingAllocation(clusterState)).type() ); // now we start the shard @@ -257,10 +217,7 @@ public void testFailedAllocation() { assertEquals(routingTable.index("idx").shard(0).shard(0).state(), STARTED); // now fail again and see if it has a new counter - List failedShards = Collections.singletonList( - new FailedShard(routingTable.index("idx").shard(0).shard(0), "ZOOOMG", new UnsupportedOperationException(), randomBoolean()) - ); - newState = strategy.applyFailedShards(clusterState, failedShards, List.of()); + newState = applyShardFailure(clusterState, routingTable.index("idx").shard(0).shard(0), "ZOOOMG"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; routingTable = newState.routingTable(); @@ -272,10 +229,83 @@ public void testFailedAllocation() { // Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision assertEquals( Decision.Type.YES, - new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() + decider.canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() + ); + } + + public void testFailedRelocation() { + ClusterState clusterState = createInitialClusterState(); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); + + // shard could be relocated while retries are not exhausted + for (int i = 0; i < retries; i++) { + clusterState = withRoutingAllocation(clusterState, allocation -> { + var source = allocation.routingTable().index("idx").shard(0).shard(0); + var targetNodeId = Objects.equals(source.currentNodeId(), "node1") ? "node2" : "node1"; + assertThat(decider.canAllocate(source, allocation).type(), equalTo(Decision.Type.YES)); + allocation.routingNodes().relocateShard(source, targetNodeId, 0, allocation.changes()); + }); + clusterState = applyShardFailure( + clusterState, + clusterState.getRoutingTable().index("idx").shard(0).shard(0).getTargetRelocatingShard(), + "boom" + i + ); + + var relocationFailureInfo = clusterState.getRoutingTable().index("idx").shard(0).shard(0).relocationFailureInfo(); + assertThat(relocationFailureInfo.failedRelocations(), equalTo(i + 1)); + } + + // shard could not be relocated when retries are exhausted + withRoutingAllocation(clusterState, allocation -> { + var source = allocation.routingTable().index("idx").shard(0).shard(0); + assertThat(decider.canAllocate(source, allocation).type(), equalTo(Decision.Type.NO)); + }); + + // manually reset retry count + clusterState = strategy.reroute(clusterState, new AllocationCommands(), false, true).clusterState(); + + // shard could be relocated again + withRoutingAllocation(clusterState, allocation -> { + var source = allocation.routingTable().index("idx").shard(0).shard(0); + assertThat(decider.canAllocate(source, allocation).type(), equalTo(Decision.Type.YES)); + }); + } + + private ClusterState applyShardFailure(ClusterState clusterState, ShardRouting shardRouting, String message) { + return strategy.applyFailedShards( + clusterState, + List.of(new FailedShard(shardRouting, message, new RuntimeException("test"), randomBoolean())), + List.of() ); } + private static ClusterState withRoutingAllocation(ClusterState clusterState, Consumer block) { + RoutingAllocation allocation = new RoutingAllocation( + null, + clusterState.mutableRoutingNodes(), + clusterState, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + 0L + ); + block.accept(allocation); + return updateClusterState(clusterState, allocation); + } + + private static ClusterState updateClusterState(ClusterState state, RoutingAllocation allocation) { + assert allocation.metadata() == state.metadata(); + if (allocation.routingNodesChanged() == false) { + return state; + } + final RoutingTable newRoutingTable = RoutingTable.of(state.routingTable().version(), allocation.routingNodes()); + final Metadata newMetadata = allocation.updateMetadataWithRoutingChanges(newRoutingTable); + assert newRoutingTable.validate(newMetadata); + + return state.copyAndUpdate(builder -> builder.routingTable(newRoutingTable).metadata(newMetadata)); + } + private RoutingAllocation newRoutingAllocation(ClusterState clusterState) { final var routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); if (randomBoolean()) { @@ -283,5 +313,4 @@ private RoutingAllocation newRoutingAllocation(ClusterState clusterState) { } return routingAllocation; } - } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java index bd484038ebca6..c82a43ed6b108 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java @@ -48,6 +48,7 @@ public static ShardRouting initWithSameId(ShardRouting copy, RecoverySource reco ShardRoutingState.INITIALIZING, recoverySource, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), + RelocationFailureInfo.NO_FAILURES, copy.allocationId(), copy.getExpectedShardSize() ); @@ -66,6 +67,7 @@ public static ShardRouting newWithRestoreSource(ShardRouting routing, SnapshotRe routing.state(), recoverySource, routing.unassignedInfo(), + routing.relocationFailureInfo(), routing.allocationId(), routing.getExpectedShardSize() ); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 3b2356670a74f..76858a91ff526 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -22,6 +22,7 @@ import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; /** * A helper that allows to create shard routing instances within tests, while not requiring to expose @@ -42,6 +43,7 @@ public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), + buildRelocationFailureInfo(state), buildAllocationId(state), -1 ); @@ -62,6 +64,7 @@ public static ShardRouting newShardRouting( state, recoverySource, buildUnassignedInfo(state), + buildRelocationFailureInfo(state), buildAllocationId(state), -1 ); @@ -99,6 +102,7 @@ public static ShardRouting newShardRouting( state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), + buildRelocationFailureInfo(state), buildAllocationId(state), -1 ); @@ -139,6 +143,7 @@ public static ShardRouting newShardRouting( state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), + buildRelocationFailureInfo(state), allocationId, -1 ); @@ -179,6 +184,7 @@ public static ShardRouting newShardRouting( state, buildRecoveryTarget(primary, state), unassignedInfo, + buildRelocationFailureInfo(state), buildAllocationId(state), -1 ); @@ -230,6 +236,13 @@ private static UnassignedInfo buildUnassignedInfo(ShardRoutingState state) { }; } + private static RelocationFailureInfo buildRelocationFailureInfo(ShardRoutingState state) { + return switch (state) { + case UNASSIGNED, INITIALIZING, STARTED -> RelocationFailureInfo.NO_FAILURES; + case RELOCATING -> randomBoolean() ? RelocationFailureInfo.NO_FAILURES : new RelocationFailureInfo(randomIntBetween(1, 10)); + }; + } + public static UnassignedInfo randomUnassignedInfo(String message) { UnassignedInfo.Reason reason = randomFrom(UnassignedInfo.Reason.values()); String lastAllocatedNodeId = null; From be149bdbf8783f94e7fa40db5c1993345c2aa4c9 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 13:49:40 +0100 Subject: [PATCH 09/26] Fix up ClusterServiceIT (#90397) ClusterServiceIT#testPendingUpdateTask has some unbounded waits, it relies on the clock advancing by at least 1ms which might not happen, and it leaves the cluster service thread blocked on failure which causes knock-on effects. This commit addresses these problems. --- .../cluster/service/ClusterServiceIT.java | 70 +++++++++++-------- 1 file changed, 39 insertions(+), 31 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index af64d252a45d9..064949bf4bae5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -384,7 +384,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) public ClusterState execute(ClusterState currentState) { invoked3.countDown(); try { - block2.await(); + assertTrue(block2.await(10, TimeUnit.SECONDS)); } catch (InterruptedException e) { fail(); } @@ -397,40 +397,48 @@ public void onFailure(Exception e) { fail(); } }); - invoked3.await(); - - for (int i = 2; i <= 5; i++) { - clusterService.submitUnbatchedStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } + assertTrue(invoked3.await(10, TimeUnit.SECONDS)); + + try { + for (int i = 2; i <= 5; i++) { + clusterService.submitUnbatchedStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(Exception e) { + fail(); + } + }); + } - @Override - public void onFailure(Exception e) { - fail(); - } - }); - } - Thread.sleep(100); + final var startNanoTime = System.nanoTime(); + while (TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanoTime, TimeUnit.NANOSECONDS) <= 0) { + // noinspection BusyWait + Thread.sleep(100); + } - pendingClusterTasks = clusterService.getMasterService().pendingTasks(); - assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); - controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); - for (PendingClusterTask task : pendingClusterTasks) { - controlSources.remove(task.getSource().string()); - } - assertTrue(controlSources.isEmpty()); + pendingClusterTasks = clusterService.getMasterService().pendingTasks(); + assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); + controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); + for (PendingClusterTask task : pendingClusterTasks) { + controlSources.remove(task.getSource().string()); + } + assertTrue(controlSources.isEmpty()); - response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); - assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); - controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); - for (PendingClusterTask task : response) { - if (controlSources.remove(task.getSource().string())) { - assertThat(task.getTimeInQueueInMillis(), greaterThan(0L)); + response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); + assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); + controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); + for (PendingClusterTask task : response) { + if (controlSources.remove(task.getSource().string())) { + assertThat(task.getTimeInQueueInMillis(), greaterThan(0L)); + } } + assertTrue(controlSources.isEmpty()); + } finally { + block2.countDown(); } - assertTrue(controlSources.isEmpty()); - block2.countDown(); } } From f823d7d4d433cb6f38ace7177809fce2c202c73d Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Tue, 27 Sep 2022 15:52:01 +0300 Subject: [PATCH 10/26] Remove legacy translog header versions and checks (#90395) Remove legacy translog header versions and checks Fixes #42699 --- .../index/translog/TranslogHeader.java | 31 ++-------------- .../index/translog/TranslogHeaderTests.java | 35 +++++++------------ 2 files changed, 15 insertions(+), 51 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java index 21e2311306b7f..a43edd4e256f1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -15,7 +15,6 @@ import org.apache.lucene.store.InputStreamDataInput; import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -31,9 +30,7 @@ final class TranslogHeader { public static final String TRANSLOG_CODEC = "translog"; - public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported - public static final int VERSION_CHECKPOINTS = 2; // added checkpoints - public static final int VERSION_PRIMARY_TERM = 3; // added primary term + public static final int VERSION_PRIMARY_TERM = 3; // with: checksums, checkpoints and primary term public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM; private final String translogUUID; @@ -96,17 +93,10 @@ private static int headerSizeInBytes(int version, int uuidLength) { static int readHeaderVersion(final Path path, final FileChannel channel, final StreamInput in) throws IOException { final int version; try { - version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM); + version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_PRIMARY_TERM, VERSION_PRIMARY_TERM); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { - tryReportOldVersionError(path, channel); throw new TranslogCorruptedException(path.toString(), "translog header corrupted", e); } - if (version == VERSION_CHECKSUMS) { - throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); - } - if (version == VERSION_CHECKPOINTS) { - throw new IllegalStateException("pre-6.3 translog found [" + path + "]"); - } return version; } @@ -158,23 +148,6 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil } } - private static void tryReportOldVersionError(final Path path, final FileChannel channel) throws IOException { - // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the header, in binary this looks like: - // binary: 0011 1111 1101 0111 0110 1100 0001 0111 - // hex : 3 f d 7 6 c 1 7 - // - // With version 0 of the translog, the first byte is the Operation.Type, which will always be between 0-4, - // so we know if we grab the first byte, it can be: - // 0x3f => Lucene's magic number, so we can assume it's version 1 or later - // 0x00 => version 0 of the translog - final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0]; - if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE - throw new TranslogCorruptedException(path.toString(), "translog looks like version 1 or later, but has corrupted header"); - } else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE - throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); - } - } - /** * Writes this header with the latest format into the file channel */ diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index c47ed7c2c39e7..ea639860d4e09 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -17,9 +17,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -51,37 +49,30 @@ public void testCurrentHeaderVersion() throws Exception { final TranslogCorruptedException corruption = expectThrows(TranslogCorruptedException.class, () -> { try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { TranslogHeader.read(randomBoolean() ? outHeader.getTranslogUUID() : UUIDs.randomBase64UUID(), translogFile, channel); - } catch (IllegalStateException e) { - // corruption corrupted the version byte making this look like a v2, v1 or v0 translog - assertThat( - "version " + TranslogHeader.VERSION_CHECKPOINTS + "-or-earlier translog", - e.getMessage(), - anyOf( - containsString("pre-2.0 translog found"), - containsString("pre-1.4 translog found"), - containsString("pre-6.3 translog found") - ) - ); - throw new TranslogCorruptedException(translogFile.toString(), "adjusted translog version", e); } }); assertThat(corruption.getMessage(), not(containsString("this translog file belongs to a different translog"))); } public void testLegacyTranslogVersions() { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", IllegalStateException.class, "pre-1.4 translog"); - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", IllegalStateException.class, "pre-2.0 translog"); - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v2.binary", IllegalStateException.class, "pre-6.3 translog"); - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", IllegalStateException.class, "pre-2.0 translog"); + final String expectedMessage = "translog header corrupted"; + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", TranslogCorruptedException.class, expectedMessage); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", TranslogCorruptedException.class, expectedMessage); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v2.binary", TranslogCorruptedException.class, expectedMessage); + checkFailsToOpen( + "/org/elasticsearch/index/translog/translog-v1-truncated.binary", + TranslogCorruptedException.class, + expectedMessage + ); checkFailsToOpen( "/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary", TranslogCorruptedException.class, - "translog looks like version 1 or later, but has corrupted header" + expectedMessage ); checkFailsToOpen( "/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", - IllegalStateException.class, - "pre-2.0 translog" + TranslogCorruptedException.class, + expectedMessage ); } @@ -101,7 +92,7 @@ public void testCorruptTranslogHeader() throws Exception { TranslogHeader.read(randomValueOtherThan(translogUUID, UUIDs::randomBase64UUID), translogFile, channel); } }); - assertThat(error, either(instanceOf(IllegalStateException.class)).or(instanceOf(TranslogCorruptedException.class))); + assertThat(error, instanceOf(TranslogCorruptedException.class)); } private void checkFailsToOpen(String file, Class expectedErrorType, String expectedMessage) { From 049a50a2bf54077667200ca023373906ee8e1398 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 27 Sep 2022 14:52:43 +0200 Subject: [PATCH 11/26] Adjust testing of the max amount of threads for the snapshot thread pool (#90363) #90282 set the max amount of threads for the snapshot thread pool to be 10 regardless of the amount of processors. --- .../org/elasticsearch/threadpool/ScalingThreadPoolTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index c2111f01558f9..07b8a629c81e6 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -39,7 +39,6 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90336") public void testScalingThreadPoolConfiguration() throws InterruptedException { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); @@ -74,7 +73,7 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { expectedMax = randomIntBetween(Math.max(1, core), 16); builder.put("thread_pool." + threadPoolName + ".max", expectedMax); } else { - expectedMax = maxBasedOnNumberOfProcessors; + expectedMax = threadPoolName.equals(ThreadPool.Names.SNAPSHOT) ? 10 : maxBasedOnNumberOfProcessors; } final long keepAlive; From 1cb534f10954701f285df983c84cd9128e98576a Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 27 Sep 2022 15:21:37 +0200 Subject: [PATCH 12/26] upgrade to lucene-9.4.0-snapshot-d2e22e18c6c (#90400) --- build-tools-internal/version.properties | 2 +- build.gradle | 4 +- gradle/verification-metadata.xml | 120 ++++++++++++++++++++++++ 3 files changed, 123 insertions(+), 3 deletions(-) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index efb78f1ffdd5b..50a6f1aaf3477 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.6.0 -lucene = 9.4.0-snapshot-f5d0646daa5 +lucene = 9.4.0-snapshot-d2e22e18c6c bundled_jdk_vendor = openjdk bundled_jdk = 18.0.2.1+1@db379da656dc47308e138f21b33976fa diff --git a/build.gradle b/build.gradle index e1e11e60e110e..133f7768370c0 100644 --- a/build.gradle +++ b/build.gradle @@ -137,9 +137,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true +boolean bwc_tests_enabled = false // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "" +String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/90400" if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 6f6567e428780..a630b05840f26 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2481,6 +2481,11 @@ + + + + + @@ -2496,6 +2501,11 @@ + + + + + @@ -2511,6 +2521,11 @@ + + + + + @@ -2526,6 +2541,11 @@ + + + + + @@ -2541,6 +2561,11 @@ + + + + + @@ -2556,6 +2581,11 @@ + + + + + @@ -2571,6 +2601,11 @@ + + + + + @@ -2586,6 +2621,11 @@ + + + + + @@ -2601,6 +2641,11 @@ + + + + + @@ -2616,6 +2661,11 @@ + + + + + @@ -2631,6 +2681,11 @@ + + + + + @@ -2646,6 +2701,11 @@ + + + + + @@ -2661,6 +2721,11 @@ + + + + + @@ -2676,6 +2741,11 @@ + + + + + @@ -2691,6 +2761,11 @@ + + + + + @@ -2706,6 +2781,11 @@ + + + + + @@ -2721,6 +2801,11 @@ + + + + + @@ -2736,6 +2821,11 @@ + + + + + @@ -2751,6 +2841,11 @@ + + + + + @@ -2766,6 +2861,11 @@ + + + + + @@ -2781,6 +2881,11 @@ + + + + + @@ -2796,6 +2901,11 @@ + + + + + @@ -2811,6 +2921,11 @@ + + + + + @@ -2826,6 +2941,11 @@ + + + + + From 02fb26c05303f15efbc1c807672e1c04c182440b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 14:27:48 +0100 Subject: [PATCH 13/26] Move testStateRecoveryResetAfterPreviousLeadership (#90403) This test was added to the top of `CoordinatorTests` for some reason, but it's rather an obscure test and doesn't deserve such a prominent position. This commit moves it down the class so that the top tests are a better starting point for exploration. --- .../coordination/CoordinatorTests.java | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 86997b2bd444f..82c792622eeab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -97,44 +97,6 @@ @TestLogging(reason = "these tests do a lot of log-worthy things but we usually don't care", value = "org.elasticsearch:FATAL") public class CoordinatorTests extends AbstractCoordinatorTestCase { - /** - * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully - * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: - * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back - * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. - * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously - * successfully completed state recovery, is never reset to a state where state recovery can be retried. - */ - public void testStateRecoveryResetAfterPreviousLeadership() { - try (Cluster cluster = new Cluster(3)) { - cluster.runRandomly(); - cluster.stabilise(); - - final ClusterNode leader = cluster.getAnyLeader(); - final ClusterNode follower1 = cluster.getAnyNodeExcept(leader); - final ClusterNode follower2 = cluster.getAnyNodeExcept(leader, follower1); - - // restart follower1 and follower2 - for (ClusterNode clusterNode : Arrays.asList(follower1, follower2)) { - clusterNode.close(); - cluster.clusterNodes.forEach(cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode(new Runnable() { - @Override - public void run() { - cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); - } - - @Override - public String toString() { - return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; - } - }))); - cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn); - } - - cluster.stabilise(); - } - } - public void testCanUpdateClusterStateAfterStabilisation() { try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { cluster.runRandomly(); @@ -754,6 +716,44 @@ public void testUnresponsiveFollowerDetectedEventually() { } } + /** + * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: + * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back + * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. + * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously + * successfully completed state recovery, is never reset to a state where state recovery can be retried. + */ + public void testStateRecoveryResetAfterPreviousLeadership() { + try (Cluster cluster = new Cluster(3)) { + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final ClusterNode follower1 = cluster.getAnyNodeExcept(leader); + final ClusterNode follower2 = cluster.getAnyNodeExcept(leader, follower1); + + // restart follower1 and follower2 + for (ClusterNode clusterNode : Arrays.asList(follower1, follower2)) { + clusterNode.close(); + cluster.clusterNodes.forEach(cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode(new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn); + } + + cluster.stabilise(); + } + } + public void testAckListenerReceivesAcksFromAllNodes() { try (Cluster cluster = new Cluster(randomIntBetween(3, 5))) { cluster.runRandomly(); From f0aa4e7a42442f7aa4a3d26cd5d67c85448c580b Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 27 Sep 2022 14:47:37 +0100 Subject: [PATCH 14/26] Update error states from inside the main state executor (#90346) * Do a check inside the main cluster state executor to confirm we should apply an error state This fixes #90337 --- docs/changelog/90346.yaml | 6 +++ .../service/ReservedClusterStateService.java | 46 ++++++------------- .../service/ReservedStateErrorTask.java | 33 +++++++++++++ .../ReservedStateErrorTaskExecutor.java | 8 +++- .../service/ReservedStateUpdateTask.java | 17 ++++--- .../ReservedClusterStateServiceTests.java | 24 +++++----- 6 files changed, 83 insertions(+), 51 deletions(-) create mode 100644 docs/changelog/90346.yaml diff --git a/docs/changelog/90346.yaml b/docs/changelog/90346.yaml new file mode 100644 index 0000000000000..bf9388a49175c --- /dev/null +++ b/docs/changelog/90346.yaml @@ -0,0 +1,6 @@ +pr: 90346 +summary: Update error states from inside the main state executor +area: Infra/Core +type: bug +issues: + - 90337 diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 1a14afa6176fe..199502f2d9c82 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -40,6 +40,8 @@ import static org.elasticsearch.ExceptionsHelper.stackTrace; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.reservedstate.service.ReservedStateErrorTask.checkErrorVersion; +import static org.elasticsearch.reservedstate.service.ReservedStateErrorTask.isNewError; import static org.elasticsearch.reservedstate.service.ReservedStateUpdateTask.checkMetadataVersion; import static org.elasticsearch.reservedstate.service.ReservedStateUpdateTask.keysForHandler; @@ -110,7 +112,7 @@ public void process(String namespace, XContentParser parser, Consumer stateChunk = stateChunkParser.apply(parser, null); } catch (Exception e) { ErrorState errorState = new ErrorState(namespace, -1L, e, ReservedStateErrorMetadata.ErrorKind.PARSING); - saveErrorState(clusterService.state(), errorState); + updateErrorState(errorState); logger.debug("error processing state change request for [{}] with the following errors [{}]", namespace, errorState); errorListener.accept( @@ -145,7 +147,7 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con ReservedStateErrorMetadata.ErrorKind.PARSING ); - saveErrorState(clusterService.state(), errorState); + updateErrorState(errorState); logger.debug("error processing state change request for [{}] with the following errors [{}]", namespace, errorState); errorListener.accept( @@ -167,7 +169,8 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con // We trial run all handler validations to ensure that we can process all of the cluster state error free. During // the trial run we collect 'consumers' (functions) for any non cluster state transforms that need to run. var trialRunResult = trialRun(namespace, state, reservedStateChunk, orderedHandlers); - var error = checkAndReportError(namespace, trialRunResult.errors, state, reservedStateVersion); + // this is not using the modified trial state above, but that doesn't matter, we're just setting errors here + var error = checkAndReportError(namespace, trialRunResult.errors, reservedStateVersion); if (error != null) { errorListener.accept(error); @@ -192,7 +195,7 @@ public void onResponse(Collection nonStateTransformResu nonStateTransformResults, handlers, orderedHandlers, - (clusterState, errorState) -> saveErrorState(clusterState, errorState), + ReservedClusterStateService.this::updateErrorState, new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) { @@ -220,18 +223,13 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { // If we encounter an error while runnin the non-state transforms, we avoid saving any cluster state. - errorListener.accept(checkAndReportError(namespace, List.of(e.getMessage()), state, reservedStateVersion)); + errorListener.accept(checkAndReportError(namespace, List.of(e.getMessage()), reservedStateVersion)); } }); } // package private for testing - Exception checkAndReportError( - String namespace, - List errors, - ClusterState currentState, - ReservedStateVersion reservedStateVersion - ) { + Exception checkAndReportError(String namespace, List errors, ReservedStateVersion reservedStateVersion) { // Any errors should be discovered through validation performed in the transform calls if (errors.isEmpty() == false) { logger.debug("Error processing state change request for [{}] with the following errors [{}]", namespace, errors); @@ -243,7 +241,7 @@ Exception checkAndReportError( ReservedStateErrorMetadata.ErrorKind.VALIDATION ); - saveErrorState(currentState, errorState); + updateErrorState(errorState); return new IllegalStateException("Error processing state change request for " + namespace + ", errors: " + errorState); } @@ -252,26 +250,10 @@ Exception checkAndReportError( } // package private for testing - static boolean isNewError(ReservedStateMetadata existingMetadata, Long newStateVersion) { - return (existingMetadata == null - || existingMetadata.errorMetadata() == null - || newStateVersion <= 0 // version will be -1 when we can't even parse the file, it might be 0 on snapshot restore - || existingMetadata.errorMetadata().version() < newStateVersion); - } - - // package private for testing - void saveErrorState(ClusterState clusterState, ErrorState errorState) { - ReservedStateMetadata existingMetadata = clusterState.metadata().reservedStateMetadata().get(errorState.namespace()); - - if (isNewError(existingMetadata, errorState.version()) == false) { - logger.info( - () -> format( - "Not updating error state because version [%s] is less or equal to the last state error version [%s]", - errorState.version(), - existingMetadata.errorMetadata().version() - ) - ); - + void updateErrorState(ErrorState errorState) { + // optimistic check here - the cluster state might change after this, so also need to re-check later + if (checkErrorVersion(clusterService.state(), errorState) == false) { + // nothing to update return; } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTask.java index b6380255f87ee..0be4a7972d05c 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTask.java @@ -8,6 +8,8 @@ package org.elasticsearch.reservedstate.service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -16,6 +18,8 @@ import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import static org.elasticsearch.core.Strings.format; + /** * Cluster state update task that sets the error state of the reserved cluster state metadata. *

@@ -23,6 +27,7 @@ * the {@link ReservedStateChunk}. */ public class ReservedStateErrorTask implements ClusterStateTaskListener { + private static final Logger logger = LogManager.getLogger(ReservedStateErrorTask.class); private final ErrorState errorState; private final ActionListener listener; @@ -41,6 +46,34 @@ ActionListener listener() { return listener; } + // package private for testing + static boolean isNewError(ReservedStateMetadata existingMetadata, Long newStateVersion) { + return (existingMetadata == null + || existingMetadata.errorMetadata() == null + || newStateVersion <= 0 // version will be -1 when we can't even parse the file, it might be 0 on snapshot restore + || existingMetadata.errorMetadata().version() < newStateVersion); + } + + static boolean checkErrorVersion(ClusterState currentState, ErrorState errorState) { + ReservedStateMetadata existingMetadata = currentState.metadata().reservedStateMetadata().get(errorState.namespace()); + // check for noop here + if (isNewError(existingMetadata, errorState.version()) == false) { + logger.info( + () -> format( + "Not updating error state because version [%s] is less or equal to the last state error version [%s]", + errorState.version(), + existingMetadata.errorMetadata().version() + ) + ); + return false; + } + return true; + } + + boolean shouldUpdate(ClusterState currentState) { + return checkErrorVersion(currentState, errorState); + } + ClusterState execute(ClusterState currentState) { ClusterState.Builder stateBuilder = new ClusterState.Builder(currentState); Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java index 4bd7b7fb44f66..af0698c7cc642 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java @@ -25,11 +25,15 @@ record ReservedStateErrorTaskExecutor() implements ClusterStateTaskExecutor batchExecutionContext) { var updatedState = batchExecutionContext.initialState(); + for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); - try (var ignored = taskContext.captureResponseHeaders()) { - updatedState = task.execute(updatedState); + if (task.shouldUpdate(updatedState)) { + try (var ignored = taskContext.captureResponseHeaders()) { + updatedState = task.execute(updatedState); + } } + // if the task didn't run, it still 'succeeded', it just didn't have any effect taskContext.success(() -> task.listener().onResponse(ActionResponse.Empty.INSTANCE)); } return updatedState; diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index f6f708af25781..08f576f4a37e0 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -29,7 +29,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiConsumer; +import java.util.function.Consumer; import static org.elasticsearch.ExceptionsHelper.stackTrace; import static org.elasticsearch.core.Strings.format; @@ -48,7 +48,7 @@ public class ReservedStateUpdateTask implements ClusterStateTaskListener { private final ReservedStateChunk stateChunk; private final Map> handlers; private final Collection orderedHandlers; - private final BiConsumer errorReporter; + private final Consumer errorReporter; private final ActionListener listener; private final Collection nonStateTransformResults; @@ -58,7 +58,7 @@ public ReservedStateUpdateTask( Collection nonStateTransformResults, Map> handlers, Collection orderedHandlers, - BiConsumer errorReporter, + Consumer errorReporter, ActionListener listener ) { this.namespace = namespace; @@ -105,7 +105,7 @@ protected ClusterState execute(final ClusterState currentState) { } } - checkAndThrowOnError(errors, currentState, reservedStateVersion); + checkAndThrowOnError(errors, reservedStateVersion); // Once we have set all of the handler state from the cluster state update tasks, we add the reserved keys // from the non cluster state transforms. @@ -122,7 +122,7 @@ protected ClusterState execute(final ClusterState currentState) { return stateBuilder.metadata(metadataBuilder).build(); } - private void checkAndThrowOnError(List errors, ClusterState currentState, ReservedStateVersion reservedStateVersion) { + private void checkAndThrowOnError(List errors, ReservedStateVersion reservedStateVersion) { // Any errors should be discovered through validation performed in the transform calls if (errors.isEmpty() == false) { logger.debug("Error processing state change request for [{}] with the following errors [{}]", namespace, errors); @@ -134,7 +134,12 @@ private void checkAndThrowOnError(List errors, ClusterState currentState ReservedStateErrorMetadata.ErrorKind.VALIDATION ); - errorReporter.accept(currentState, errorState); + /* + * It doesn't matter this reporter needs to re-access the base state, + * any updates set by this task will just be discarded when the below exception is thrown, + * and we just need to set the error state once + */ + errorReporter.accept(errorState); throw new IllegalStateException("Error processing state change request for " + namespace + ", errors: " + errorState); } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index e71958dca95c7..1af56ed9079ab 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -153,7 +153,7 @@ public void testUpdateStateTasks() throws Exception { List.of(), Collections.emptyMap(), Collections.emptySet(), - (clusterState, errorState) -> {}, + errorState -> {}, new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) {} @@ -326,10 +326,10 @@ public Map fromXContent(XContentParser parser) throws IOExceptio Metadata metadata = Metadata.builder().put(operatorMetadata).build(); ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); - assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 2L)); - assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 1L)); - assertTrue(ReservedClusterStateService.isNewError(operatorMetadata, 3L)); - assertTrue(ReservedClusterStateService.isNewError(null, 1L)); + assertFalse(ReservedStateErrorTask.isNewError(operatorMetadata, 2L)); + assertFalse(ReservedStateErrorTask.isNewError(operatorMetadata, 1L)); + assertTrue(ReservedStateErrorTask.isNewError(operatorMetadata, 3L)); + assertTrue(ReservedStateErrorTask.isNewError(null, 1L)); var chunk = new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)); var orderedHandlers = List.of(exceptionThrower.name(), newStateMaker.name()); @@ -343,7 +343,7 @@ public Map fromXContent(XContentParser parser) throws IOExceptio List.of(), Map.of(exceptionThrower.name(), exceptionThrower, newStateMaker.name(), newStateMaker), orderedHandlers, - (clusterState, errorState) -> { assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, errorState.version())); }, + errorState -> assertFalse(ReservedStateErrorTask.isNewError(operatorMetadata, errorState.version())), new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) {} @@ -484,17 +484,19 @@ public void testDuplicateHandlerNames() { public void testCheckAndReportError() { ClusterService clusterService = mock(ClusterService.class); + var state = ClusterState.builder(new ClusterName("elasticsearch")).build(); + when(clusterService.state()).thenReturn(state); + final var controller = spy(new ReservedClusterStateService(clusterService, List.of())); - assertNull(controller.checkAndReportError("test", List.of(), null, null)); - verify(controller, times(0)).saveErrorState(any(), any()); + assertNull(controller.checkAndReportError("test", List.of(), null)); + verify(controller, times(0)).updateErrorState(any()); - var state = ClusterState.builder(new ClusterName("elasticsearch")).build(); var version = new ReservedStateVersion(2L, Version.CURRENT); - var error = controller.checkAndReportError("test", List.of("test error"), state, version); + var error = controller.checkAndReportError("test", List.of("test error"), version); assertThat(error, allOf(notNullValue(), instanceOf(IllegalStateException.class))); assertEquals("Error processing state change request for test, errors: test error", error.getMessage()); - verify(controller, times(1)).saveErrorState(any(), any()); + verify(controller, times(1)).updateErrorState(any()); } public void testTrialRunExtractsNonStateActions() { From e89b4b86a22e9cc5034f8f64c9e2b7549e12e09c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 14:42:06 +0100 Subject: [PATCH 15/26] AwaitsFix for #90407 --- .../org/elasticsearch/action/search/SearchResponseTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 4815030e73a7b..6b229310ad3a0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -142,6 +142,7 @@ static SearchResponse.Clusters randomClusters() { * the "_shard/total/failures" section makes it impossible to directly * compare xContent, so we omit it here */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90407") public void testFromXContent() throws IOException { doFromXContentTestWithRandomFields(createTestItem(), false); } @@ -181,6 +182,7 @@ private void doFromXContentTestWithRandomFields(SearchResponse response, boolean * Because of this, in this special test case we compare the "top level" fields for equality * and the subsections xContent equivalence independently */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90407") public void testFromXContentWithFailures() throws IOException { int numFailures = randomIntBetween(1, 5); ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; From 2d68beb2f15c3b78970fd4f5a0de4f22161ce9e2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Sep 2022 15:04:09 +0100 Subject: [PATCH 16/26] Tidy up testMasterAwareExecution (#90382) This test doesn't fail tidily (it doesn't bound the waiting time on the latches, nor does it clean up the `MasterService` on failure) and it checks for success in a rather roundabout way. This commit improves the test. --- .../cluster/service/MasterServiceTests.java | 64 ++++++++----------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 1f4a4279af9e5..6ed1436327dd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalMasterServiceTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.ack.AckedRequest; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.coordination.ClusterStatePublisher; @@ -152,45 +153,36 @@ private MasterService createMasterService(boolean makeMaster, TaskManager taskMa } public void testMasterAwareExecution() throws Exception { - final MasterService nonMaster = createMasterService(false); - - final boolean[] taskFailed = { false }; - final CountDownLatch latch1 = new CountDownLatch(1); - nonMaster.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - latch1.countDown(); - return currentState; - } - - @Override - public void onFailure(Exception e) { - taskFailed[0] = true; - latch1.countDown(); - } - }); - - latch1.await(); - assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); + try (var nonMaster = createMasterService(false)) { + final CountDownLatch latch1 = new CountDownLatch(1); + nonMaster.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask(randomFrom(Priority.values())) { + @Override + public ClusterState execute(ClusterState currentState) { + throw new AssertionError("should not execute this task"); + } - final CountDownLatch latch2 = new CountDownLatch(1); - new LocalMasterServiceTask(Priority.NORMAL) { - @Override - public void execute(ClusterState currentState) { - taskFailed[0] = false; - latch2.countDown(); - } + @Override + public void onFailure(Exception e) { + assert e instanceof NotMasterException : e; + latch1.countDown(); + } + }); + assertTrue(latch1.await(10, TimeUnit.SECONDS)); - @Override - public void onFailure(Exception e) { - taskFailed[0] = true; - latch2.countDown(); - } - }.submit(nonMaster, "test"); - latch2.await(); - assertFalse("non-master cluster state update task was not executed", taskFailed[0]); + final CountDownLatch latch2 = new CountDownLatch(1); + new LocalMasterServiceTask(randomFrom(Priority.values())) { + @Override + public void execute(ClusterState currentState) { + latch2.countDown(); + } - nonMaster.close(); + @Override + public void onFailure(Exception e) { + throw new AssertionError("should not fail this task", e); + } + }.submit(nonMaster, "test"); + assertTrue(latch2.await(10, TimeUnit.SECONDS)); + } } /** From f2154e86870dc93e2e5a97469d8f4a7c9f4fad92 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 27 Sep 2022 16:14:38 +0200 Subject: [PATCH 17/26] Fix unnecessary string concatenations (#90405) --- .../org/elasticsearch/client/DeadHostState.java | 2 +- .../java/org/elasticsearch/client/Response.java | 2 +- .../org/elasticsearch/client/ResponseException.java | 2 +- .../java/org/elasticsearch/client/RestClient.java | 4 +--- .../elasticsearch/client/DeadHostStateTests.java | 2 +- .../client/RestClientBuilderTests.java | 2 +- .../org/elasticsearch/client/sniff/Sniffer.java | 2 +- .../core/internal/provider/EmbeddedModulePath.java | 2 +- .../geometry/utils/GeographyValidator.java | 4 +--- .../geometry/utils/StandardValidator.java | 4 +--- .../org/elasticsearch/geometry/LinearRingTests.java | 2 +- .../org/elasticsearch/geometry/PolygonTests.java | 2 +- .../action/admin/ReloadSecureSettingsIT.java | 4 +--- .../allocation/ClusterAllocationExplainIT.java | 2 +- .../coordination/RemoveCustomsCommandIT.java | 5 +---- .../coordination/RemoveSettingsCommandIT.java | 2 +- .../discovery/ClusterDisruptionIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../indices/template/SimpleIndexTemplateIT.java | 4 ++-- .../IngestProcessorNotInstalledOnAllNodesIT.java | 2 +- .../aggregations/pipeline/BucketScriptIT.java | 2 +- .../search/aggregations/pipeline/BucketSortIT.java | 2 +- .../subphase/highlight/HighlighterSearchIT.java | 13 +++---------- .../versioning/ConcurrentSeqNoVersioningIT.java | 2 +- .../versioning/SimpleVersioningIT.java | 4 +--- .../main/java/org/elasticsearch/env/ShardLock.java | 2 +- .../index/cache/bitset/BitsetFilterCache.java | 2 +- .../index/codec/PerFieldMapperCodec.java | 2 +- .../index/engine/DeleteVersionValue.java | 2 +- .../ElasticsearchConcurrentMergeScheduler.java | 2 +- .../java/org/elasticsearch/index/engine/Engine.java | 2 +- .../index/engine/IndexVersionValue.java | 2 +- .../elasticsearch/index/engine/VersionValue.java | 2 +- .../elasticsearch/index/mapper/RuntimeField.java | 2 +- .../org/elasticsearch/index/mapper/TypeParsers.java | 2 +- .../mapper/flattened/FlattenedFieldParser.java | 4 ++-- .../mapper/vectors/DenseVectorFieldMapper.java | 4 ++-- .../index/query/AbstractQueryBuilder.java | 2 +- .../index/query/FieldMaskingSpanQueryBuilder.java | 2 +- .../org/elasticsearch/index/query/Rewriteable.java | 4 ++-- .../index/seqno/LocalCheckpointTracker.java | 2 +- .../elasticsearch/index/seqno/RetentionLeases.java | 2 +- .../elasticsearch/index/seqno/SequenceNumbers.java | 2 +- .../org/elasticsearch/index/shard/ShardPath.java | 2 +- .../src/main/java/org/elasticsearch/node/Node.java | 2 +- .../java/org/elasticsearch/rest/RestRequest.java | 2 +- .../admin/cluster/RestClusterHealthAction.java | 2 +- .../action/admin/cluster/RestNodesStatsAction.java | 2 +- .../admin/indices/RestGetFieldMappingAction.java | 4 ++-- .../action/admin/indices/RestGetMappingAction.java | 2 +- .../admin/indices/RestIndicesStatsAction.java | 2 +- .../action/admin/indices/RestPutMappingAction.java | 2 +- .../admin/indices/RestValidateQueryAction.java | 2 +- .../rest/action/cat/RestNodesAction.java | 2 +- .../rest/action/document/RestBulkAction.java | 2 +- .../rest/action/document/RestMultiGetAction.java | 2 +- .../action/document/RestMultiTermVectorsAction.java | 2 +- .../rest/action/document/RestTermVectorsAction.java | 2 +- .../rest/action/search/RestExplainAction.java | 4 ++-- .../rest/action/search/RestSearchAction.java | 6 +++--- .../index/query/BoolQueryBuilderTests.java | 2 +- .../index/query/TermQueryBuilderTests.java | 2 +- .../index/query/TermsQueryBuilderTests.java | 2 +- .../query/functionscore/FunctionScoreTests.java | 2 +- 64 files changed, 74 insertions(+), 94 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java index 1858644ce5a07..52f8caff39eea 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -92,7 +92,7 @@ int getFailedAttempts() { public int compareTo(DeadHostState other) { if (timeSupplier != other.timeSupplier) { throw new IllegalArgumentException( - "can't compare DeadHostStates holding different time suppliers as they may " + "be based on different clocks" + "can't compare DeadHostStates holding different time suppliers as they may be based on different clocks" ); } return Long.compare(deadUntilNanos, other.deadUntilNanos); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 9dbac4287212c..7249fe4f06568 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -201,6 +201,6 @@ HttpResponse getHttpResponse() { @Override public String toString() { - return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; + return "Response{requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java index 52d59019a5fc0..d083bbe4fe616 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java @@ -50,7 +50,7 @@ static String buildMessage(Response response) throws IOException { ); if (response.hasWarnings()) { - message += "\n" + "Warnings: " + response.getWarnings(); + message += "\nWarnings: " + response.getWarnings(); } HttpEntity entity = response.getEntity(); diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index d27d46df451f4..7154a2be5bbd8 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -513,9 +513,7 @@ static Iterable selectNodes( return singletonList(Collections.min(selectedDeadNodes).node); } } - throw new IOException( - "NodeSelector [" + nodeSelector + "] rejected all nodes, " + "living " + livingNodes + " and dead " + deadNodes - ); + throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, living " + livingNodes + " and dead " + deadNodes); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index a7fc42f2c3a51..859db4eea6db7 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -79,7 +79,7 @@ public void testCompareToDifferingTimeSupplier() { fail("expected failure"); } catch (IllegalArgumentException e) { assertEquals( - "can't compare DeadHostStates holding different time suppliers as they may " + "be based on different clocks", + "can't compare DeadHostStates holding different time suppliers as they may be based on different clocks", e.getMessage() ); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 9e2f2c1b05c80..c7fc59361e920 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -182,7 +182,7 @@ public void testBuildCloudId() throws IOException { RestClient.builder(badId + ":"); fail("should have failed"); } catch (IllegalStateException e) { - assertEquals("cloudId " + badId + ":" + " must begin with a human readable identifier followed by a colon", e.getMessage()); + assertEquals("cloudId " + badId + ": must begin with a human readable identifier followed by a colon", e.getMessage()); } RestClient client = RestClient.builder(encodedId).build(); diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index ea434d054a90a..ca17db3ef3a3d 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -147,7 +147,7 @@ public void run() { ScheduledTask previousTask = nextScheduledTask; nextScheduledTask = new ScheduledTask(task, future); assert initialized.get() == false || previousTask.task.isSkipped() || previousTask.task.hasStarted() - : "task that we are replacing is neither " + "cancelled nor has it ever started"; + : "task that we are replacing is neither cancelled nor has it ever started"; } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedModulePath.java b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedModulePath.java index 4e880a416e609..abf29c3c43656 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedModulePath.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedModulePath.java @@ -213,7 +213,7 @@ static Optional toPackageName(Path file, String separator) { if (parent == null) { String name = file.toString(); if (name.endsWith(".class") && name.equals(MODULE_INFO) == false) { - String msg = name + " found in top-level directory" + " (unnamed package not allowed in module)"; + String msg = name + " found in top-level directory (unnamed package not allowed in module)"; throw new InvalidModuleDescriptorException(msg); } return Optional.empty(); diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/GeographyValidator.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/GeographyValidator.java index 83d19244dce24..08250417d76c1 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/GeographyValidator.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/GeographyValidator.java @@ -84,9 +84,7 @@ protected void checkLongitude(double longitude) { protected void checkAltitude(double zValue) { if (ignoreZValue == false && Double.isNaN(zValue) == false) { - throw new IllegalArgumentException( - "found Z value [" + zValue + "] but [ignore_z_value] " + "parameter is [" + ignoreZValue + "]" - ); + throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] parameter is [" + ignoreZValue + "]"); } } diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/StandardValidator.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/StandardValidator.java index fd45b01565e38..a6c992878d14b 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/StandardValidator.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/StandardValidator.java @@ -41,9 +41,7 @@ public static GeometryValidator instance(boolean ignoreZValue) { protected void checkZ(double zValue) { if (ignoreZValue == false && Double.isNaN(zValue) == false) { - throw new IllegalArgumentException( - "found Z value [" + zValue + "] but [ignore_z_value] " + "parameter is [" + ignoreZValue + "]" - ); + throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] parameter is [" + ignoreZValue + "]"); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/LinearRingTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/LinearRingTests.java index 0ebe257d5aaa3..f154e339d3dd1 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/LinearRingTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/LinearRingTests.java @@ -31,7 +31,7 @@ public void testInitValidation() { () -> validator.validate(new LinearRing(new double[] { 3, 4, 5 }, new double[] { 1, 2, 3 })) ); assertEquals( - "first and last points of the linear ring must be the same (it must close itself): x[0]=3.0 x[2]=5.0 y[0]=1.0 " + "y[2]=3.0", + "first and last points of the linear ring must be the same (it must close itself): x[0]=3.0 x[2]=5.0 y[0]=1.0 y[2]=3.0", ex.getMessage() ); diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java index 80e371b1f0993..5445de367c218 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java @@ -128,7 +128,7 @@ public void testWKTValidation() { ) ); assertEquals( - "first and last points of the linear ring must be the same (it must close itself): " + "x[0]=0.5 x[2]=2.0 y[0]=1.5 y[2]=1.0", + "first and last points of the linear ring must be the same (it must close itself): x[0]=0.5 x[2]=2.0 y[0]=1.5 y[2]=1.0", ex.getMessage() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 71fd02deecc2a..41a4c8342431b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -189,9 +189,7 @@ public void onFailure(Exception e) { assertThat(e, instanceOf(ElasticsearchException.class)); assertThat( e.getMessage(), - containsString( - "Secure settings cannot be updated cluster wide when TLS for the " + "transport layer is not enabled" - ) + containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled") ); } finally { latch.countDown(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index e8411b5124113..6859f484aea36 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -470,7 +470,7 @@ public void testAllocationFilteringOnIndexCreation() throws Exception { if (d.label().equals("filter")) { assertEquals(Decision.Type.NO, d.type()); assertEquals( - "node does not match index setting [index.routing.allocation.include] filters " + "[_name:\"non_existent_node\"]", + "node does not match index setting [index.routing.allocation.include] filters [_name:\"non_existent_node\"]", d.getExplanation() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java index b898eafe0022a..3d120656c8d28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java @@ -84,10 +84,7 @@ public void testCustomDoesNotMatch() throws Exception { UserException.class, () -> removeCustoms(environment, false, new String[] { "index-greveyard-with-typos" }) ); - assertThat( - ex.getMessage(), - containsString("No custom metadata matching [index-greveyard-with-typos] were " + "found on this node") - ); + assertThat(ex.getMessage(), containsString("No custom metadata matching [index-greveyard-with-typos] were found on this node")); } private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort, String... args) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java index cf64b1909c32d..1970928818a9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java @@ -130,7 +130,7 @@ public void testSettingDoesNotMatch() throws Exception { ); assertThat( ex.getMessage(), - containsString("No persistent cluster settings matching [cluster.routing.allocation.disk.bla.*] were " + "found on this node") + containsString("No persistent cluster settings matching [cluster.routing.allocation.disk.bla.*] were found on this node") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 0483d8ba0e903..6f8f2eb11a802 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -530,7 +530,7 @@ public void testRestartNodeWhileIndexing() throws Exception { IndexShard shard = indicesService.getShardOrNull(shardRouting.shardId()); Set docs = IndexShardTestCase.getShardDocUIDs(shard); assertThat( - "shard [" + shard.routingEntry() + "] docIds [" + docs + "] vs " + " acked docIds [" + ackedDocs + "]", + "shard [" + shard.routingEntry() + "] docIds [" + docs + "] vs acked docIds [" + ackedDocs + "]", ackedDocs, everyItem(is(in(docs))) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 510011e183e80..5fc3f66ea5f33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -1062,7 +1062,7 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { .getShard(0) .getRetentionLeases(); throw new AssertionError( - "expect an operation-based recovery:" + "retention leases" + Strings.toString(retentionLeases) + "]" + "expect an operation-based recovery:retention leases" + Strings.toString(retentionLeases) + "]" ); } connection.sendRequest(requestId, action, request, options); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 64760ee8c154c..3ea358aebcd44 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -964,7 +964,7 @@ public void testPartitionedTemplate() throws Exception { ); assertThat( eBadSettings.getMessage(), - containsString("partition size [6] should be a positive number " + "less than the number of shards [5]") + containsString("partition size [6] should be a positive number less than the number of shards [5]") ); // provide an invalid mapping for a partitioned index @@ -1003,7 +1003,7 @@ public void testPartitionedTemplate() throws Exception { assertThat( eBadIndex.getMessage(), - containsString("partition size [6] should be a positive number " + "less than the number of shards [5]") + containsString("partition size [6] should be a positive number less than the number of shards [5]") ); // finally, create a valid index diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 0d178e09a3710..6706a4ec0fa87 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -99,7 +99,7 @@ public void testFailStartNode() throws Exception { assertThat(pipeline.getId(), equalTo("_id")); assertThat( pipeline.getDescription(), - equalTo("this is a place holder pipeline, " + "because pipeline with id [_id] could not be loaded") + equalTo("this is a place holder pipeline, because pipeline with id [_id] could not be loaded") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 48dcb10b344e1..a40e9cc872904 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -531,7 +531,7 @@ public void testStoredScript() { .setId("my_script") // Script source is not interpreted but it references a pre-defined script from CustomScriptPlugin .setContent( - new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\"," + " \"source\": \"my_script\" } }"), + new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\", \"source\": \"my_script\" } }"), XContentType.JSON ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index dd8a3163b312a..b0e596d6e7ab0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -529,7 +529,7 @@ public void testNeitherSortsNorSizeSpecifiedAndFromIsDefault_ShouldThrowValidati ); assertThat( e.getMessage(), - containsString("[bucketSort] is configured to perform nothing." + " Please set either of [sort, size, from] to use bucket_sort") + containsString("[bucketSort] is configured to perform nothing. Please set either of [sort, size, from] to use bucket_sort") ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 56056fb120549..c5fdbc5a3973e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -358,7 +358,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { "no_long_term", "This is a test where foo is highlighed and should be highlighted", "long_term", - "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed " + "and should be highlighted" + "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted" ) .get(); refresh(); @@ -1339,14 +1339,7 @@ public void testSameContent() throws Exception { .get(); for (int i = 0; i < 5; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test on the highlighting bug " + "present in elasticsearch") - ); + assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug present in elasticsearch")); } } @@ -1624,7 +1617,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")), RestStatus.BAD_REQUEST, containsString( - "the field [title] should be indexed with term vector with position offsets to be " + "used with fast vector highlighter" + "the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter" ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index f5ab3d63f46e8..08cc5fa784fc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -335,7 +335,7 @@ public int compareTo(Version other) { @Override public String toString() { - return "{" + "primaryTerm=" + primaryTerm + ", seqNo=" + seqNo + '}'; + return "{primaryTerm=" + primaryTerm + ", seqNo=" + seqNo + '}'; } public Version nextSeqNo(int increment) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 8d3123e9f115c..052c61635bab6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -258,9 +258,7 @@ public void testRequireUnitsOnUpdateSettings() throws Exception { // expected assertTrue( iae.getMessage() - .contains( - "failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is " + "missing or unrecognized" - ) + .contains("failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized") ); } } diff --git a/server/src/main/java/org/elasticsearch/env/ShardLock.java b/server/src/main/java/org/elasticsearch/env/ShardLock.java index 06c62674c7471..7759cb555baed 100644 --- a/server/src/main/java/org/elasticsearch/env/ShardLock.java +++ b/server/src/main/java/org/elasticsearch/env/ShardLock.java @@ -53,7 +53,7 @@ public void setDetails(String details) {} @Override public String toString() { - return "ShardLock{" + "shardId=" + shardId + '}'; + return "ShardLock{shardId=" + shardId + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 27bf865ea8705..b228c68f2a631 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -288,7 +288,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin ); } } catch (Exception e) { - indexShard.warmerService().logger().warn(() -> "failed to load " + "bitset for [" + filterToWarm + "]", e); + indexShard.warmerService().logger().warn(() -> "failed to load bitset for [" + filterToWarm + "]", e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index e4bb82e585857..268ba302033ff 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -39,7 +39,7 @@ public class PerFieldMapperCodec extends Lucene94Codec { static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMapperCodec.class) - : "PerFieldMapperCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC; + : "PerFieldMapperCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC; } public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, BigArrays bigArrays) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index 1bf47235d072f..629c68444ead8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -53,6 +53,6 @@ public int hashCode() { @Override public String toString() { - return "DeleteVersionValue{" + "version=" + version + ", seqNo=" + seqNo + ", term=" + term + ",time=" + time + '}'; + return "DeleteVersionValue{version=" + version + ", seqNo=" + seqNo + ", term=" + term + ",time=" + time + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index fc708555184c4..bb2cc34daae37 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -140,7 +140,7 @@ protected void doMerge(MergeSource mergeSource, MergePolicy.OneMerge merge) thro String message = String.format( Locale.ROOT, - "merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], " + "[%s throttled]", + "merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], [%s throttled]", getSegmentName(merge), TimeValue.timeValueMillis(tookMS), totalSizeInBytes / 1024f / 1024f, diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 952ef783a2ea5..c05428e4abb24 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1156,7 +1156,7 @@ public void failEngine(String reason, @Nullable Exception failure) { } } else { logger.debug( - () -> format("tried to fail engine but could not acquire lock - engine should " + "be failed by now [%s]", reason), + () -> format("tried to fail engine but could not acquire lock - engine should be failed by now [%s]", reason), failure ); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java index 51d76db42959e..50953f4c26fd3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java @@ -45,7 +45,7 @@ public int hashCode() { @Override public String toString() { - return "IndexVersionValue{" + "version=" + version + ", seqNo=" + seqNo + ", term=" + term + ", location=" + translogLocation + '}'; + return "IndexVersionValue{version=" + version + ", seqNo=" + seqNo + ", term=" + term + ", location=" + translogLocation + '}'; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index b4d2767924df8..c1d66d98a1045 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -70,7 +70,7 @@ public int hashCode() { @Override public String toString() { - return "VersionValue{" + "version=" + version + ", seqNo=" + seqNo + ", term=" + term + '}'; + return "VersionValue{version=" + version + ", seqNo=" + seqNo + ", term=" + term + '}'; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RuntimeField.java b/server/src/main/java/org/elasticsearch/index/mapper/RuntimeField.java index 1bf8c5d937e1c..9c9899b334827 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RuntimeField.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RuntimeField.java @@ -162,7 +162,7 @@ static Map parseRuntimeFields( runtimeFields.put(fieldName, null); } else { throw new MapperParsingException( - "Runtime field [" + fieldName + "] was set to null but its removal is not supported " + "in this context" + "Runtime field [" + fieldName + "] was set to null but its removal is not supported in this context" ); } } else if (entry.getValue() instanceof Map) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 4662c680d1561..16b2bab0d49ad 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -133,7 +133,7 @@ public static boolean parseMultiField( String multiFieldName = multiFieldEntry.getKey(); if (multiFieldName.contains(".")) { throw new MapperParsingException( - "Field name [" + multiFieldName + "] which is a multi field of [" + name + "] cannot" + " contain '.'" + "Field name [" + multiFieldName + "] which is a multi field of [" + name + "] cannot contain '.'" ); } if ((multiFieldEntry.getValue() instanceof Map) == false) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java index 02ad93689dd7b..ecb7b1a736dbf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java @@ -128,7 +128,7 @@ private void addField(ContentPath path, String currentName, String value, List depthLimit) { throw new IllegalArgumentException( - "The provided [flattened] field [" + rootFieldName + "]" + " exceeds the maximum depth limit of [" + depthLimit + "]." + "The provided [flattened] field [" + rootFieldName + "] exceeds the maximum depth limit of [" + depthLimit + "]." ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 35525da88dec2..ffaa9d8340f60 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -306,7 +306,7 @@ public KnnVectorQuery createKnnQuery(float[] queryVector, int numCands, Query fi if (queryVector.length != dims) { throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] " + "than the index vectors [" + dims + "]" + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" ); } @@ -324,7 +324,7 @@ private void checkVectorMagnitude(float[] vector, float squaredMagnitude) { StringBuilder errorBuilder = null; if (similarity == VectorSimilarity.dot_product && Math.abs(squaredMagnitude - 1.0f) > 1e-4f) { errorBuilder = new StringBuilder( - "The [" + VectorSimilarity.dot_product.name() + "] similarity can " + "only be used with unit-length vectors." + "The [" + VectorSimilarity.dot_product.name() + "] similarity can only be used with unit-length vectors." ); } else if (similarity == VectorSimilarity.cosine && Math.sqrt(squaredMagnitude) == 0.0f) { errorBuilder = new StringBuilder( diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index f34bf256a56a5..e9ce701a75010 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -149,7 +149,7 @@ public final float boost() { protected final void checkNegativeBoost(float boost) { if (Float.compare(boost, 0f) < 0) { throw new IllegalArgumentException( - "negative [boost] are not allowed in [" + toString() + "], " + "use a value between 0 and 1 to deboost" + "negative [boost] are not allowed in [" + toString() + "], use a value between 0 and 1 to deboost" ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java index dcb2e2d3e09ea..5ff78c048d383 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -110,7 +110,7 @@ public static FieldMaskingSpanQueryBuilder fromXContent(XContentParser parser) t if (query instanceof SpanQueryBuilder == false) { throw new ParsingException( parser.getTokenLocation(), - "[" + NAME.getPreferredName() + "] query must " + "be of type span query" + "[" + NAME.getPreferredName() + "] query must be of type span query" ); } inner = (SpanQueryBuilder) query; diff --git a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java index c3d7dc1443210..b17cc77388fed 100644 --- a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java +++ b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java @@ -63,7 +63,7 @@ static > T rewrite(T original, QueryRewriteContext cont // this is some protection against user provided queries if they don't obey the contract of rewrite we allow 16 rounds // and then we fail to prevent infinite loops throw new IllegalStateException( - "too many rewrite rounds, rewriteable might return new objects even if they are not " + "rewritten" + "too many rewrite rounds, rewriteable might return new objects even if they are not rewritten" ); } } @@ -94,7 +94,7 @@ static > void rewriteAndFetch( // this is some protection against user provided queries if they don't obey the contract of rewrite we allow 16 rounds // and then we fail to prevent infinite loops throw new IllegalStateException( - "too many rewrite rounds, rewriteable might return new objects even if they are not " + "rewritten" + "too many rewrite rounds, rewriteable might return new objects even if they are not rewritten" ); } if (context.hasAsyncActions()) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 1efe5b63329e5..c945fb4627611 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -62,7 +62,7 @@ public class LocalCheckpointTracker { public LocalCheckpointTracker(final long maxSeqNo, final long localCheckpoint) { if (localCheckpoint < 0 && localCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) { throw new IllegalArgumentException( - "local checkpoint must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] " + "but was [" + localCheckpoint + "]" + "local checkpoint must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] but was [" + localCheckpoint + "]" ); } if (maxSeqNo < 0 && maxSeqNo != SequenceNumbers.NO_OPS_PERFORMED) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java index bbb9806f92b75..57ee1ca69e750 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -274,7 +274,7 @@ public int hashCode() { @Override public String toString() { - return "RetentionLeases{" + "primaryTerm=" + primaryTerm + ", version=" + version + ", leases=" + leases + '}'; + return "RetentionLeases{primaryTerm=" + primaryTerm + ", version=" + version + ", leases=" + leases + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 79d19ded953b1..0cd451f6be2cf 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -114,7 +114,7 @@ public CommitInfo(long maxSeqNo, long localCheckpoint) { @Override public String toString() { - return "CommitInfo{" + "maxSeqNo=" + maxSeqNo + ", localCheckpoint=" + localCheckpoint + '}'; + return "CommitInfo{maxSeqNo=" + maxSeqNo + ", localCheckpoint=" + localCheckpoint + '}'; } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index a31ac6903d03e..341276ecee76d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -322,6 +322,6 @@ public int hashCode() { @Override public String toString() { - return "ShardPath{" + "path=" + path + ", shard=" + shardId + '}'; + return "ShardPath{path=" + path + ", shard=" + shardId + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 54ce372c9b1a3..87494341f4638 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -256,7 +256,7 @@ public class Node implements Closeable { (key) -> new Setting<>(key, "", (value) -> { if (value.length() > 0 && (Character.isWhitespace(value.charAt(0)) || Character.isWhitespace(value.charAt(value.length() - 1)))) { - throw new IllegalArgumentException(key + " cannot have leading or trailing whitespace " + "[" + value + "]"); + throw new IllegalArgumentException(key + " cannot have leading or trailing whitespace [" + value + "]"); } if (value.length() > 0 && "node.attr.server_name".equals(key)) { try { diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index d4e7486c8ad41..8835b8ca1ad61 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -144,7 +144,7 @@ protected RestRequest(RestRequest other) { if (header == null || header.isEmpty()) { return null; } else if (header.size() > 1) { - throw new IllegalArgumentException("Incorrect header [" + headerName + "]. " + "Only one value should be provided"); + throw new IllegalArgumentException("Incorrect header [" + headerName + "]. Only one value should be provided"); } String rawContentType = header.get(0); if (Strings.hasText(rawContentType)) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index df0b2c872a8fc..4b0340496120d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -70,7 +70,7 @@ public static ClusterHealthRequest fromRequest(final RestRequest request) { if (request.hasParam("wait_for_relocating_shards")) { // wait_for_relocating_shards has been removed in favor of wait_for_no_relocating_shards throw new IllegalArgumentException( - "wait_for_relocating_shards has been removed, " + "use wait_for_no_relocating_shards [true/false] instead" + "wait_for_relocating_shards has been removed, use wait_for_no_relocating_shards [true/false] instead" ); } String waitForActiveShards = request.param("wait_for_active_shards"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index 0b1b2e395715f..dfed1979120ee 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -34,7 +34,7 @@ public class RestNodesStatsAction extends BaseRestHandler { private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestNodesStatsAction.class); - private static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying types in nodes stats requests is deprecated."; + private static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in nodes stats requests is deprecated."; @Override public List routes() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java index 58d94047e805d..666d33a23bba9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -70,14 +70,14 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); if (includeTypeName == false && types.length > 0) { - throw new IllegalArgumentException("Types cannot be specified unless include_type_name" + " is set to true."); + throw new IllegalArgumentException("Types cannot be specified unless include_type_name is set to true."); } if (request.hasParam("local")) { request.param("local"); deprecationLogger.compatibleCritical( "get_field_mapping_local", - "Use [local] in get field mapping requests is deprecated. " + "The parameter will be removed in the next major version" + "Use [local] in get field mapping requests is deprecated. The parameter will be removed in the next major version" ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index a171db32fbc7a..d0bbd51da8d8b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -66,7 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); if (request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY) == false && types.length > 0) { throw new IllegalArgumentException( - "Types cannot be provided in get mapping requests, unless" + " include_type_name is set to true." + "Types cannot be provided in get mapping requests, unless include_type_name is set to true." ); } if (request.method().equals(HEAD)) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index dbe2f98d71e61..737c1ccc4cba6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -82,7 +82,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ? indicesStatsRequest.indicesOptions() : IndicesOptions.strictExpandOpen(); assert indicesStatsRequest.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() - : "IndicesStats default indices " + "options changed"; + : "IndicesStats default indices options changed"; indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, defaultIndicesOption)); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index a8b90878157ac..c657bb906ca01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -74,7 +74,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String type = request.param("type"); if (includeTypeName == false && (type != null || isMappingSourceTyped(MapperService.SINGLE_MAPPING_NAME, sourceAsMap))) { throw new IllegalArgumentException( - "Types cannot be provided in put mapping requests, unless " + "the include_type_name parameter is set to true." + "Types cannot be provided in put mapping requests, unless the include_type_name parameter is set to true." ); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index 052ee9682e264..a211859c7236e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -34,7 +34,7 @@ public class RestValidateQueryAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestValidateQueryAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in validate query requests is deprecated."; + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in validate query requests is deprecated."; @Override public List routes() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index ac149b5d826d7..f1ab64fa0c662 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -241,7 +241,7 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell( "refresh.listeners", - "alias:rli,refreshListeners;default:false;text-align:right;" + "desc:number of pending refresh listeners" + "alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners" ); table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 286116f69d6ae..994b4fdf81b12 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -37,7 +37,7 @@ * */ public class RestBulkAction extends BaseRestHandler { - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in bulk requests is deprecated."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in bulk requests is deprecated."; private final boolean allowExplicitIndex; diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java index 3d6df0516fe7c..7637c553654a5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java @@ -26,7 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiGetAction extends BaseRestHandler { - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in multi get requests is deprecated."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in multi get requests is deprecated."; private final boolean allowExplicitIndex; diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java index b5f542bc85f43..3881d0b916c65 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsAction.java @@ -27,7 +27,7 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMultiTermVectorsAction.class); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying types in multi term vector requests is deprecated."; + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in multi term vector requests is deprecated."; @Override public List routes() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java index 9cc4b01576cbc..a67f346f86eaa 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java @@ -32,7 +32,7 @@ * TermVectorsRequest. */ public class RestTermVectorsAction extends BaseRestHandler { - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying types in term vector requests is deprecated."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in term vector requests is deprecated."; @Override public List routes() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java index d08a3703fa431..6830b7809de26 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java @@ -29,7 +29,7 @@ * Rest action for computing a score explanation for specific documents. */ public class RestExplainAction extends BaseRestHandler { - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] " + "Specifying a type in explain requests is deprecated."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying a type in explain requests is deprecated."; @Override public List routes() { @@ -68,7 +68,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (request.param("fields") != null) { throw new IllegalArgumentException( - "The parameter [fields] is no longer supported, " + "please use [stored_fields] to retrieve stored fields" + "The parameter [fields] is no longer supported, please use [stored_fields] to retrieve stored fields" ); } String sField = request.param("stored_fields"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 14e6b07d74a1c..da4021fb11b80 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -57,7 +57,7 @@ public class RestSearchAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in search requests is deprecated."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in search requests is deprecated."; /** * Indicates whether hits.total should be rendered as an integer or an object @@ -362,7 +362,7 @@ static void preparePointInTime(SearchRequest request, RestRequest restRequest, N ActionRequestValidationException validationException = null; if (request.indices().length > 0) { validationException = addValidationError( - "[indices] cannot be used with point in time. Do " + "not specify any index with point in time.", + "[indices] cannot be used with point in time. Do not specify any index with point in time.", validationException ); } @@ -440,7 +440,7 @@ private static void checkRestTotalHits(RestRequest restRequest, SearchRequest se private static void checkSearchType(RestRequest restRequest, SearchRequest searchRequest) { if (restRequest.hasParam("search_type") && searchRequest.hasKnnSearch()) { throw new IllegalArgumentException( - "cannot set [search_type] when using [knn] search, since the search type " + "is determined automatically" + "cannot set [search_type] when using [knn] search, since the search type is determined automatically" ); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 7827d4c93816d..3c29391518e98 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -148,7 +148,7 @@ protected Map getAlternateVersions() { expectedQuery.filter(filter); } contentString = contentString.substring(0, contentString.length() - 1); - contentString += " } \n" + "}"; + contentString += " } \n}"; alternateVersions.put(contentString, expectedQuery); return alternateVersions; } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index ea1f9c6160cb0..5e445529159ce 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -142,7 +142,7 @@ public void testGeo() throws Exception { SearchExecutionContext context = createSearchExecutionContext(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> query.toQuery(context)); assertEquals( - "Geometry fields do not support exact searching, " + "use dedicated geometry queries instead: [mapped_geo_point]", + "Geometry fields do not support exact searching, use dedicated geometry queries instead: [mapped_geo_point]", e.getMessage() ); } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 9cc03612ccbe4..45a0a2ce91e94 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -277,7 +277,7 @@ public void testGeo() throws Exception { SearchExecutionContext context = createSearchExecutionContext(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> query.toQuery(context)); assertEquals( - "Geometry fields do not support exact searching, use dedicated geometry queries instead: " + "[mapped_geo_point]", + "Geometry fields do not support exact searching, use dedicated geometry queries instead: [mapped_geo_point]", e.getMessage() ); } diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index 9c84658fc6a0a..edc2a03b301d4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -439,7 +439,7 @@ public void testExplainFiltersFunctionScoreQuery() throws IOException { checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 3); assertThat( functionExplanation.getDetails()[0].getDetails()[3].getDetails()[1].getDetails()[0].toString(), - equalTo("0.1 = exp(- " + "MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)] * 2.3025850929940455)\n") + equalTo("0.1 = exp(- MAX[Math.max(Math.abs(1.0(=doc value) - 0.0(=origin))) - 0.0(=offset), 0)] * 2.3025850929940455)\n") ); assertThat(functionExplanation.getDetails()[0].getDetails()[3].getDetails()[1].getDetails()[0].getDetails().length, equalTo(0)); From fe2ec6c9167a6abc04739ab4d8d012f92623f0bd Mon Sep 17 00:00:00 2001 From: Mark Laney Date: Tue, 27 Sep 2022 08:35:16 -0600 Subject: [PATCH 18/26] Remove any mention of "mapping type" (#86242) Mapping types were removed in v6.0 so they shouldn't be mentioned in the description of inheritance of the `dynamic` setting. --- docs/reference/mapping/params/dynamic.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/params/dynamic.asciidoc b/docs/reference/mapping/params/dynamic.asciidoc index 7e7d055d72d25..ac95d5de80b89 100644 --- a/docs/reference/mapping/params/dynamic.asciidoc +++ b/docs/reference/mapping/params/dynamic.asciidoc @@ -42,7 +42,7 @@ GET my-index-000001/_mapping [[dynamic-inner-objects]] ==== Setting `dynamic` on inner objects <> inherit the `dynamic` setting from their parent -object or from the mapping type. In the following example, dynamic mapping is +object. In the following example, dynamic mapping is disabled at the type level, so no new top-level fields will be added dynamically. From 17aef3ccd487a2c13a4b4ce8acd2043612174a6d Mon Sep 17 00:00:00 2001 From: Nikola Grcevski <6207777+grcevski@users.noreply.github.com> Date: Tue, 27 Sep 2022 11:01:13 -0400 Subject: [PATCH 19/26] Change how test does cleanup (#90373) --- .../RoleMappingFileSettingsIT.java | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 0d107cf0f861f..5e6f63f04ff58 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -139,9 +139,6 @@ public class RoleMappingFileSettingsIT extends NativeRealmIntegTestCase { @After public void cleanUp() throws IOException { - var fileSettingsService = internalCluster().getInstance(FileSettingsService.class, internalCluster().getMasterName()); - Files.deleteIfExists(fileSettingsService.operatorSettingsFile()); - ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -349,7 +346,24 @@ private void assertRoleMappingsNotSaved(CountDownLatch savedClusterState, Atomic public void testErrorSaved() throws Exception { ensureGreen(); - var savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); + // save an empty file to clear any prior state, this ensures we don't get a stale file left over by another test + var savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); + + writeJSONFile(internalCluster().getMasterName(), emptyJSON); + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get())) + .get(); + + assertNull( + clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) + ); + + // save a bad file + savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); writeJSONFile(internalCluster().getMasterName(), testErrorJSON); assertRoleMappingsNotSaved(savedClusterState.v1(), savedClusterState.v2()); From b6e7b349167a5ca5e23a7fc6e5f5700cd28664f4 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 27 Sep 2022 17:28:12 +0200 Subject: [PATCH 20/26] enable bwc after upgrade to lucene-9.4.0-snapshot-d2e22e18c6c (#90411) Change has been backported to 8.5 so we can enable BWC again. --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 133f7768370c0..e1e11e60e110e 100644 --- a/build.gradle +++ b/build.gradle @@ -137,9 +137,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false +boolean bwc_tests_enabled = true // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/90400" +String bwc_tests_disabled_issue = "" if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From d1d0f6d6230df3fdb47edafdab1c4964693e3e98 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 27 Sep 2022 19:13:40 +0300 Subject: [PATCH 21/26] [ML] Update `number_of_allocations` description in REST spec (#90413) In 8.5 the definition of `number_of_allocations` parameter to the start trained model deployment API was changed. This commit updates the REST spec accordingly. --- .../rest-api-spec/api/ml.start_trained_model_deployment.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json index 5e06207e66b4a..36e6714b3e312 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json @@ -35,7 +35,7 @@ }, "number_of_allocations":{ "type":"int", - "description": "The number of model allocations on each node where the model is deployed.", + "description": "The total number of allocations this model is assigned across machine learning nodes.", "required": false, "default": 1 }, From 4d34667f93768bd1edc728ffab1216dc286c98d4 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 27 Sep 2022 11:25:16 -0700 Subject: [PATCH 22/26] Fix XContent output for SearchProfileDfsPhaseResult (#90393) The test SearchProfileResultsTests.testFromXContent created in (#90200) may fail when the contents of a dfs profile result are null as the dfs field has no object to associate it's name with. This change corrects that error. Closes #90392 Closes #90407 --- .../search/profile/SearchProfileDfsPhaseResult.java | 4 ++-- .../org/elasticsearch/action/search/SearchResponseTests.java | 2 -- .../search/profile/SearchProfileDfsPhaseResultTests.java | 5 ----- .../search/profile/SearchProfileResultsTests.java | 5 ----- .../org/elasticsearch/test/AbstractSerializingTestCase.java | 2 +- 5 files changed, 3 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java index dbe3d28c6e77d..a8a0203a49034 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java @@ -62,12 +62,12 @@ public static SearchProfileDfsPhaseResult fromXContent(XContentParser parser) th @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); if (queryProfileShardResult != null) { - builder.startObject(); builder.field(KNN.getPreferredName()); queryProfileShardResult.toXContent(builder, params); - builder.endObject(); } + builder.endObject(); return builder; } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 6b229310ad3a0..4815030e73a7b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -142,7 +142,6 @@ static SearchResponse.Clusters randomClusters() { * the "_shard/total/failures" section makes it impossible to directly * compare xContent, so we omit it here */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90407") public void testFromXContent() throws IOException { doFromXContentTestWithRandomFields(createTestItem(), false); } @@ -182,7 +181,6 @@ private void doFromXContentTestWithRandomFields(SearchResponse response, boolean * Because of this, in this special test case we compare the "top level" fields for equality * and the subsections xContent equivalence independently */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90407") public void testFromXContentWithFailures() throws IOException { int numFailures = randomIntBetween(1, 5); ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java index 4fb10a5d70af6..fa68d4420df23 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java @@ -35,9 +35,4 @@ protected Reader instanceReader() { protected SearchProfileDfsPhaseResult doParseInstance(XContentParser parser) throws IOException { return SearchProfileDfsPhaseResult.fromXContent(parser); } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90392") - public void testFromXContent() throws IOException { - super.testFromXContent(); - } } diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java index 7ce348064f71d..256c60c5ab4f7 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsTests.java @@ -57,9 +57,4 @@ protected SearchProfileResults doParseInstance(XContentParser parser) throws IOE protected Predicate getRandomFieldsExcludeFilter() { return ProfileResultTests.RANDOM_FIELDS_EXCLUDE_FILTER; } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90392") - public void testFromXContent() throws IOException { - super.testFromXContent(); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 9dfee23bd9c90..a880ac6bc18cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -35,7 +35,7 @@ public abstract class AbstractSerializingTestCase Date: Tue, 27 Sep 2022 14:30:14 -0400 Subject: [PATCH 23/26] Operator/ingest (#89735) Add support for /_ingest/pipeline for file based settings. Relates to #89183 --- docs/changelog/89735.yaml | 5 + .../ingest/IngestFileSettingsIT.java | 273 ++++++++++++++++++ .../cluster/node/info/NodesInfoRequest.java | 14 + .../ingest/DeletePipelineTransportAction.java | 12 + .../ingest/PutPipelineTransportAction.java | 13 +- .../action/ingest/ReservedPipelineAction.java | 142 +++++++++ .../elasticsearch/ingest/IngestService.java | 79 +++-- .../java/org/elasticsearch/node/Node.java | 7 +- .../service/FileSettingsService.java | 75 ++++- .../service/ReservedClusterStateService.java | 14 +- .../ingest/ReservedPipelineActionTests.java | 243 ++++++++++++++++ .../elasticsearch/ingest/FakeProcessor.java | 4 +- .../service/FileSettingsServiceTests.java | 198 ++++++++++++- 13 files changed, 1022 insertions(+), 57 deletions(-) create mode 100644 docs/changelog/89735.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java create mode 100644 server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java create mode 100644 server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java diff --git a/docs/changelog/89735.yaml b/docs/changelog/89735.yaml new file mode 100644 index 0000000000000..8fd69d53b3790 --- /dev/null +++ b/docs/changelog/89735.yaml @@ -0,0 +1,5 @@ +pr: 89735 +summary: Operator/ingest +area: Infra/Core +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java new file mode 100644 index 0000000000000..1fc92aa4ae686 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.ReservedPipelineAction; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParserConfiguration; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.xcontent.XContentType.JSON; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public class IngestFileSettingsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(CustomIngestTestPlugin.class); + } + + private static AtomicLong versionCounter = new AtomicLong(1); + + private static String testJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "ingest_pipelines": { + "my_ingest_pipeline": { + "description": "_description", + "processors": [ + { + "test" : { + "field": "pipeline", + "value": "pipeline" + } + } + ] + }, + "my_ingest_pipeline_1": { + "description": "_description", + "processors": [ + { + "test" : { + "field": "pipeline", + "value": "pipeline" + } + } + ] + } + } + } + }"""; + + private static String testErrorJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "ingest_pipelines": { + "my_ingest_pipeline": { + "description": "_description", + "processors": [ + { + "foo" : { + "field": "pipeline", + "value": "pipeline" + } + } + ] + } + } + } + }"""; + + private void writeJSONFile(String node, String json) throws Exception { + long version = versionCounter.incrementAndGet(); + + FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); + assertTrue(fileSettingsService.watching()); + + Files.deleteIfExists(fileSettingsService.operatorSettingsFile()); + + Files.createDirectories(fileSettingsService.operatorSettingsDir()); + Path tempFilePath = createTempFile(); + + logger.info("--> writing JSON config to node {} with path {}", node, tempFilePath); + logger.info(Strings.format(json, version)); + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + } + + private Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedPipelineAction.NAME); + if (handlerMetadata != null && handlerMetadata.keys().contains("my_ingest_pipeline")) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { + boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())) + .get(); + + ReservedStateMetadata reservedState = clusterStateResponse.getState() + .metadata() + .reservedStateMetadata() + .get(FileSettingsService.NAMESPACE); + + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedPipelineAction.NAME); + + assertThat(handlerMetadata.keys(), allOf(notNullValue(), containsInAnyOrder("my_ingest_pipeline", "my_ingest_pipeline_1"))); + + // Try using the REST API to update the my_autoscaling_policy policy + // This should fail, we have reserved certain autoscaling policies in operator mode + assertEquals( + "Failed to process request [org.elasticsearch.action.ingest.PutPipelineRequest/unset] with errors: " + + "[[my_ingest_pipeline] set as read-only by [file_settings]]", + expectThrows( + IllegalArgumentException.class, + () -> client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline")).actionGet() + ).getMessage() + ); + } + + public void testPoliciesApplied() throws Exception { + ensureGreen(); + + var savedClusterState = setupClusterStateListener(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), testJSON); + + assertPipelinesSaveOK(savedClusterState.v1(), savedClusterState.v2()); + } + + private Tuple setupClusterStateListenerForError(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.errorMetadata() != null) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + assertEquals(ReservedStateErrorMetadata.ErrorKind.VALIDATION, reservedState.errorMetadata().errorKind()); + assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); + assertThat( + reservedState.errorMetadata().errors().get(0), + containsString("org.elasticsearch.ElasticsearchParseException: No processor type exists with name [foo]") + ); + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private void assertPipelinesNotSaved(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { + boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + // This should succeed, nothing was reserved + client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline_bad")).get(); + } + + public void testErrorSaved() throws Exception { + ensureGreen(); + var savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); + + writeJSONFile(internalCluster().getMasterName(), testErrorJSON); + assertPipelinesNotSaved(savedClusterState.v1(), savedClusterState.v2()); + } + + private PutPipelineRequest sampleRestRequest(String id) throws Exception { + var json = """ + { + "description": "_description", + "processors": [ + { + "test" : { + "field": "_foo", + "value": "_bar" + } + } + ] + }"""; + + try ( + var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); + var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis); + var builder = XContentFactory.contentBuilder(JSON) + ) { + builder.map(parser.map()); + return new PutPipelineRequest(id, BytesReference.bytes(builder), JSON); + } + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put("test", (factories, tag, description, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new FakeProcessor("test", tag, description, (ingestDocument) -> ingestDocument.setFieldValue(field, value)); + }); + + return processors; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java index b47eec1010085..a40743dedc002 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -125,6 +125,20 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(requestedMetrics.toArray(String[]::new)); } + /** + * Helper method for creating NodesInfoRequests with desired metrics + * @param metrics the metrics to include in the request + * @return + */ + public static NodesInfoRequest requestWithMetrics(Metric... metrics) { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.clear(); + for (var metric : metrics) { + nodesInfoRequest.addMetric(metric.metricName()); + } + return nodesInfoRequest; + } + /** * An enumeration of the "core" sections of metrics that may be requested * from the nodes information endpoint. Eventually this list list will be diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index ff5edcc4ab3f8..acb2e91433623 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -22,6 +22,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Optional; +import java.util.Set; + public class DeletePipelineTransportAction extends AcknowledgedTransportMasterNodeAction { private final IngestService ingestService; @@ -62,4 +65,13 @@ protected ClusterBlockException checkBlock(DeletePipelineRequest request, Cluste return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + @Override + public Optional reservedStateHandlerName() { + return Optional.of(ReservedPipelineAction.NAME); + } + + @Override + public Set modifiedKeys(DeletePipelineRequest request) { + return Set.of(request.getId()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index ff48d73752b7f..80359b8dd2a3c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -25,10 +25,12 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Optional; +import java.util.Set; + import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; public class PutPipelineTransportAction extends AcknowledgedTransportMasterNodeAction { - private final IngestService ingestService; private final OriginSettingClient client; @@ -73,4 +75,13 @@ protected ClusterBlockException checkBlock(PutPipelineRequest request, ClusterSt return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + @Override + public Optional reservedStateHandlerName() { + return Optional.of(ReservedPipelineAction.NAME); + } + + @Override + public Set modifiedKeys(PutPipelineRequest request) { + return Set.of(request.getId()); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java new file mode 100644 index 0000000000000..05dfbc563689e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.reservedstate.ReservedClusterStateHandler; +import org.elasticsearch.reservedstate.TransformState; +import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This Action is the reserved state save version of RestPutPipelineAction/RestDeletePipelineAction + *

+ * It is used by the ReservedClusterStateService to add/update or remove ingest pipelines. Typical usage + * for this action is in the context of file based state. + */ +public class ReservedPipelineAction implements ReservedClusterStateHandler> { + public static final String NAME = "ingest_pipelines"; + + private final IngestService ingestService; + private final FileSettingsService fileSettingsService; + + /** + * Creates a ReservedPipelineAction + * + * @param ingestService requires {@link IngestService} for storing/deleting the pipelines + * @param fileSettingsService required for supplying the latest node infos + */ + public ReservedPipelineAction(IngestService ingestService, FileSettingsService fileSettingsService) { + this.ingestService = ingestService; + this.fileSettingsService = fileSettingsService; + } + + @Override + public String name() { + return NAME; + } + + private Collection prepare(List requests) { + var exceptions = new ArrayList(); + NodesInfoResponse nodeInfos = fileSettingsService.nodeInfos(); + assert nodeInfos != null; + for (var pipeline : requests) { + try { + ingestService.validatePipelineRequest(pipeline, nodeInfos); + } catch (Exception e) { + exceptions.add(e); + } + } + + if (exceptions.isEmpty() == false) { + var illegalArgumentException = new IllegalArgumentException("Error processing ingest pipelines"); + exceptions.forEach(illegalArgumentException::addSuppressed); + throw illegalArgumentException; + } + + return requests; + } + + private ClusterState wrapIngestTaskExecute(IngestService.PipelineClusterStateUpdateTask task, ClusterState state) { + final var allIndexMetadata = state.metadata().indices().values(); + final IngestMetadata currentIndexMetadata = state.metadata().custom(IngestMetadata.TYPE); + + var updatedIngestMetadata = task.execute(currentIndexMetadata, allIndexMetadata); + return state.copyAndUpdateMetadata(b -> b.putCustom(IngestMetadata.TYPE, updatedIngestMetadata)); + } + + @Override + public TransformState transform(Object source, TransformState prevState) throws Exception { + @SuppressWarnings("unchecked") + var requests = prepare((List) source); + + ClusterState state = prevState.state(); + + for (var request : requests) { + var nopUpdate = IngestService.isNoOpPipelineUpdate(state, request); + + if (nopUpdate) { + continue; + } + + var task = new IngestService.PutPipelineClusterStateUpdateTask(request); + state = wrapIngestTaskExecute(task, state); + } + + Set entities = requests.stream().map(r -> r.getId()).collect(Collectors.toSet()); + + Set toDelete = new HashSet<>(prevState.keys()); + toDelete.removeAll(entities); + + for (var pipelineToDelete : toDelete) { + var task = new IngestService.DeletePipelineClusterStateUpdateTask(pipelineToDelete); + state = wrapIngestTaskExecute(task, state); + } + + return new TransformState(state, entities); + } + + @Override + public List fromXContent(XContentParser parser) throws IOException { + List result = new ArrayList<>(); + + Map source = parser.map(); + + for (String id : source.keySet()) { + @SuppressWarnings("unchecked") + Map content = (Map) source.get(id); + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.map(content); + result.add(new PutPipelineRequest(id, BytesReference.bytes(builder), XContentType.JSON)); + } catch (Exception e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + } + + return result; + } + +} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index d2e671550a39a..387ea98aa4f97 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -133,7 +133,7 @@ public class IngestService implements ClusterStateApplier, ReportingService listener; PipelineClusterStateUpdateTask(ActionListener listener) { @@ -336,8 +336,10 @@ public void delete(DeletePipelineRequest request, ActionListener listener, DeletePipelineRequest request) { @@ -345,6 +347,13 @@ static class DeletePipelineClusterStateUpdateTask extends PipelineClusterStateUp this.request = request; } + /** + * Used by the {@link org.elasticsearch.action.ingest.ReservedPipelineAction} + */ + public DeletePipelineClusterStateUpdateTask(String id) { + this(null, new DeletePipelineRequest(id)); + } + @Override public IngestMetadata execute(IngestMetadata currentIngestMetadata, Collection allIndexMetadata) { if (currentIngestMetadata == null) { @@ -461,31 +470,15 @@ public void putPipeline( ActionListener listener, Consumer> nodeInfoListener ) throws Exception { - - Map pipelineConfig = null; - IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); - if (request.getVersion() == null - && currentIngestMetadata != null - && currentIngestMetadata.getPipelines().containsKey(request.getId())) { - pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - var currentPipeline = currentIngestMetadata.getPipelines().get(request.getId()); - if (currentPipeline.getConfigAsMap().equals(pipelineConfig)) { - // existing pipeline matches request pipeline -- no need to update - listener.onResponse(AcknowledgedResponse.TRUE); - return; - } + if (isNoOpPipelineUpdate(state, request)) { + // existing pipeline matches request pipeline -- no need to update + listener.onResponse(AcknowledgedResponse.TRUE); + return; } - final Map config = pipelineConfig == null - ? XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2() - : pipelineConfig; nodeInfoListener.accept(ActionListener.wrap(nodeInfos -> { - Map ingestInfos = new HashMap<>(); - for (NodeInfo nodeInfo : nodeInfos.getNodes()) { - ingestInfos.put(nodeInfo.getNode(), nodeInfo.getInfo(IngestInfo.class)); - } + validatePipelineRequest(request, nodeInfos); - validatePipeline(ingestInfos, request.getId(), config); clusterService.submitStateUpdateTask( "put-pipeline-" + request.getId(), new PutPipelineClusterStateUpdateTask(listener, request), @@ -495,6 +488,31 @@ public void putPipeline( }, listener::onFailure)); } + public void validatePipelineRequest(PutPipelineRequest request, NodesInfoResponse nodeInfos) throws Exception { + final Map config = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + Map ingestInfos = new HashMap<>(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + ingestInfos.put(nodeInfo.getNode(), nodeInfo.getInfo(IngestInfo.class)); + } + + validatePipeline(ingestInfos, request.getId(), config); + } + + public static boolean isNoOpPipelineUpdate(ClusterState state, PutPipelineRequest request) { + IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); + if (request.getVersion() == null + && currentIngestMetadata != null + && currentIngestMetadata.getPipelines().containsKey(request.getId())) { + var pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + var currentPipeline = currentIngestMetadata.getPipelines().get(request.getId()); + if (currentPipeline.getConfigAsMap().equals(pipelineConfig)) { + return true; + } + } + + return false; + } + /** * Returns the pipeline by the specified id */ @@ -553,8 +571,10 @@ private static List> getProcessorMetrics( return processorMetrics; } - // visible for testing - static class PutPipelineClusterStateUpdateTask extends PipelineClusterStateUpdateTask { + /** + * Used in this class and externally by the {@link org.elasticsearch.action.ingest.ReservedPipelineAction} + */ + public static class PutPipelineClusterStateUpdateTask extends PipelineClusterStateUpdateTask { private final PutPipelineRequest request; PutPipelineClusterStateUpdateTask(ActionListener listener, PutPipelineRequest request) { @@ -562,6 +582,13 @@ static class PutPipelineClusterStateUpdateTask extends PipelineClusterStateUpdat this.request = request; } + /** + * Used by {@link org.elasticsearch.action.ingest.ReservedPipelineAction} + */ + public PutPipelineClusterStateUpdateTask(PutPipelineRequest request) { + this(null, request); + } + @Override public IngestMetadata execute(IngestMetadata currentIngestMetadata, Collection allIndexMetadata) { BytesReference pipelineSource = request.getSource(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 87494341f4638..837c02062088d 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; +import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchTransportService; @@ -832,9 +833,13 @@ protected Node( FileSettingsService fileSettingsService = new FileSettingsService( clusterService, actionModule.getReservedClusterStateService(), - environment + environment, + client ); + actionModule.getReservedClusterStateService() + .installStateHandler(new ReservedPipelineAction(ingestService, fileSettingsService)); + RestoreService restoreService = new RestoreService( clusterService, repositoryService, diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index e64f1c4e58ac3..fc885ee8970b1 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -10,6 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.internal.ClusterAdminClient; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -56,6 +61,7 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C private final ClusterService clusterService; private final ReservedClusterStateService stateService; private final Path operatorSettingsDir; + private final NodeClient nodeClient; private WatchService watchService; // null; private CountDownLatch watcherThreadLatch; @@ -70,6 +76,9 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C public static final String OPERATOR_DIRECTORY = "operator"; + private volatile NodesInfoResponse nodesInfoResponse = null; + private volatile boolean nodeInfosRefreshRequired = true; + /** * Constructs the {@link FileSettingsService} * @@ -77,10 +86,16 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C * @param stateService an instance of the immutable cluster state controller, so we can perform the cluster state changes * @param environment we need the environment to pull the location of the config and operator directories */ - public FileSettingsService(ClusterService clusterService, ReservedClusterStateService stateService, Environment environment) { + public FileSettingsService( + ClusterService clusterService, + ReservedClusterStateService stateService, + Environment environment, + NodeClient nodeClient + ) { this.clusterService = clusterService; this.stateService = stateService; this.operatorSettingsDir = environment.configFile().toAbsolutePath().resolve(OPERATOR_DIRECTORY); + this.nodeClient = nodeClient; } public Path operatorSettingsDir() { @@ -134,6 +149,7 @@ private boolean currentNodeMaster(ClusterState clusterState) { public void clusterChanged(ClusterChangedEvent event) { ClusterState clusterState = event.state(); startIfMaster(clusterState); + checkForNodeChanges(event); } private void startIfMaster(ClusterState clusterState) { @@ -378,7 +394,7 @@ private WatchKey enableSettingsWatcher(WatchKey previousKey, Path settingsDir) t ); } - CountDownLatch processFileSettings(Path path, Consumer errorHandler) throws IOException { + CountDownLatch processFileSettings(Path path, Consumer errorHandler) { CountDownLatch waitForCompletion = new CountDownLatch(1); logger.info("processing path [{}] for [{}]", path, NAMESPACE); try ( @@ -386,20 +402,49 @@ CountDownLatch processFileSettings(Path path, Consumer errorHandler) var bis = new BufferedInputStream(fis); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - stateService.process(NAMESPACE, parser, (e) -> { - try { - if (e != null) { - errorHandler.accept(e); + ReservedStateChunk parsedState = stateService.parse(NAMESPACE, parser); + if (nodeInfosRefreshRequired || nodesInfoResponse == null) { + var nodesInfoRequest = NodesInfoRequest.requestWithMetrics(NodesInfoRequest.Metric.INGEST); + + clusterAdminClient().nodesInfo(nodesInfoRequest, new ActionListener<>() { + @Override + public void onResponse(NodesInfoResponse response) { + // stash the latest node infos response and continue with processing the file + nodesInfoResponse = response; + nodeInfosRefreshRequired = false; + stateService.process(NAMESPACE, parsedState, (e) -> completeProcessing(e, errorHandler, waitForCompletion)); } - } finally { - waitForCompletion.countDown(); - } - }); + + @Override + public void onFailure(Exception e) { + completeProcessing(e, errorHandler, waitForCompletion); + } + }); + } else { + stateService.process(NAMESPACE, parsedState, (e) -> completeProcessing(e, errorHandler, waitForCompletion)); + } + } catch (Exception e) { + completeProcessing(e, errorHandler, waitForCompletion); } return waitForCompletion; } + // package private for testing, separate method so that it can be mocked in tests + ClusterAdminClient clusterAdminClient() { + return nodeClient.admin().cluster(); + } + + private void completeProcessing(Exception e, Consumer errorHandler, CountDownLatch completionLatch) { + try { + if (e != null) { + errorHandler.accept(e); + } + } finally { + completionLatch.countDown(); + } + } + /** * Holds information about the last known state of the file we watched. We use this * class to determine if a file has been changed. @@ -415,4 +460,14 @@ public FileSettingsStartupException(String message, Throwable t) { super(message, t); } } + + void checkForNodeChanges(ClusterChangedEvent event) { + if (currentNodeMaster(event.state()) && event.nodesChanged()) { + nodeInfosRefreshRequired = true; + } + } + + public NodesInfoResponse nodeInfos() { + return nodesInfoResponse; + } } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 199502f2d9c82..0a3a9f447d0e9 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -97,6 +97,18 @@ public ReservedClusterStateService(ClusterService clusterService, List ReservedStateVersion.parse(p), METADATA_FIELD); } + ReservedStateChunk parse(String namespace, XContentParser parser) { + try { + return stateChunkParser.apply(parser, null); + } catch (Exception e) { + ErrorState errorState = new ErrorState(namespace, -1L, e, ReservedStateErrorMetadata.ErrorKind.PARSING); + updateErrorState(errorState); + logger.debug("error processing state change request for [{}] with the following errors [{}]", namespace, errorState); + + throw new IllegalStateException("Error processing state change request for " + namespace + ", errors: " + errorState, e); + } + } + /** * Saves and reserves a chunk of the cluster state under a given 'namespace' from {@link XContentParser} * @@ -109,7 +121,7 @@ public void process(String namespace, XContentParser parser, Consumer ReservedStateChunk stateChunk; try { - stateChunk = stateChunkParser.apply(parser, null); + stateChunk = parse(namespace, parser); } catch (Exception e) { ErrorState errorState = new ErrorState(namespace, -1L, e, ReservedStateErrorMetadata.ErrorKind.PARSING); updateErrorState(errorState); diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java new file mode 100644 index 0000000000000..9562e469435fb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -0,0 +1,243 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.ingest.FakeProcessor; +import org.elasticsearch.ingest.IngestInfo; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.ProcessorInfo; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.reservedstate.TransformState; +import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.reservedstate.service.ReservedClusterStateService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * Tests that the ReservedPipelineAction does validation, can add and remove pipelines + */ +public class ReservedPipelineActionTests extends ESTestCase { + private static final IngestPlugin DUMMY_PLUGIN = new IngestPlugin() { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put("set", (factories, tag, description, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new FakeProcessor("set", tag, description, (ingestDocument) -> ingestDocument.setFieldValue(field, value)); + }); + + return processors; + } + }; + + private ThreadPool threadPool; + private IngestService ingestService; + private FileSettingsService fileSettingsService; + + @Before + public void setup() { + threadPool = mock(ThreadPool.class); + when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + + Client client = mock(Client.class); + ingestService = new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + Collections.singletonList(DUMMY_PLUGIN), + client + ); + Map factories = ingestService.getProcessorFactories(); + assertTrue(factories.containsKey("set")); + assertEquals(1, factories.size()); + + DiscoveryNode discoveryNode = new DiscoveryNode( + "_node_id", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); + + NodeInfo nodeInfo = new NodeInfo( + Version.CURRENT, + Build.CURRENT, + discoveryNode, + Settings.EMPTY, + null, + null, + null, + null, + null, + null, + null, + new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))), + null, + null + ); + NodesInfoResponse response = new NodesInfoResponse(new ClusterName("elasticsearch"), List.of(nodeInfo), List.of()); + + var clusterService = spy( + new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool, + null + ) + ); + + fileSettingsService = spy( + new FileSettingsService( + clusterService, + mock(ReservedClusterStateService.class), + newEnvironment(Settings.EMPTY), + mock(NodeClient.class) + ) + ); + + doReturn(response).when(fileSettingsService).nodeInfos(); + } + + private TransformState processJSON(ReservedPipelineAction action, TransformState prevState, String json) throws Exception { + try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { + return action.transform(action.fromXContent(parser), prevState); + } + } + + public void testValidation() throws Exception { + ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).build(); + TransformState prevState = new TransformState(state, Collections.emptySet()); + ReservedPipelineAction action = makeSpiedAction(); + + String badPolicyJSON = """ + { + "my_ingest_pipeline": { + "description": "_description", + "processors": [ + { + "foo" : { + "field": "pipeline", + "value": "pipeline" + } + } + ] + } + }"""; + + assertEquals( + "Error processing ingest pipelines", + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage() + ); + } + + public void testAddRemoveIngestPipeline() throws Exception { + ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).build(); + TransformState prevState = new TransformState(state, Collections.emptySet()); + ReservedPipelineAction action = makeSpiedAction(); + + String emptyJSON = ""; + + TransformState updatedState = processJSON(action, prevState, emptyJSON); + assertEquals(0, updatedState.keys().size()); + assertEquals(prevState.state(), updatedState.state()); + + String json = """ + { + "my_ingest_pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "_field", + "value": "_value" + } + } + ] + }, + "my_ingest_pipeline_1": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "_field", + "value": "_value" + } + } + ] + } + }"""; + + prevState = updatedState; + updatedState = processJSON(action, prevState, json); + assertThat(updatedState.keys(), containsInAnyOrder("my_ingest_pipeline", "my_ingest_pipeline_1")); + + String halfJSON = """ + { + "my_ingest_pipeline_1": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "_field", + "value": "_value" + } + } + ] + } + }"""; + + updatedState = processJSON(action, prevState, halfJSON); + assertThat(updatedState.keys(), containsInAnyOrder("my_ingest_pipeline_1")); + + updatedState = processJSON(action, prevState, emptyJSON); + assertThat(updatedState.keys(), empty()); + } + + @SuppressWarnings("unchecked") + private ReservedPipelineAction makeSpiedAction() { + return spy(new ReservedPipelineAction(ingestService, fileSettingsService)); + } +} diff --git a/server/src/test/java/org/elasticsearch/ingest/FakeProcessor.java b/server/src/test/java/org/elasticsearch/ingest/FakeProcessor.java index ac1f91f151b96..e759bcf1e42b9 100644 --- a/server/src/test/java/org/elasticsearch/ingest/FakeProcessor.java +++ b/server/src/test/java/org/elasticsearch/ingest/FakeProcessor.java @@ -10,13 +10,13 @@ import java.util.function.Consumer; -class FakeProcessor implements Processor { +public class FakeProcessor implements Processor { private String type; private String tag; private String description; private Consumer executor; - FakeProcessor(String type, String tag, String description, Consumer executor) { + public FakeProcessor(String type, String tag, String description, Consumer executor) { this.type = type; this.tag = tag; this.description = description; diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index ff043b216e149..7bb757e9e10f7 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -8,7 +8,14 @@ package org.elasticsearch.reservedstate.service; +import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.internal.ClusterAdminClient; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -18,11 +25,12 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.ingest.IngestInfo; +import org.elasticsearch.ingest.ProcessorInfo; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentParser; import org.junit.After; import org.junit.Before; import org.mockito.Mockito; @@ -36,11 +44,14 @@ import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -58,8 +69,12 @@ public class FileSettingsServiceTests extends ESTestCase { private FileSettingsService fileSettingsService; private ReservedClusterStateService controller; private ThreadPool threadpool; + private NodeClient nodeClient; + private ClusterAdminClient clusterAdminClient; + private NodeInfo nodeInfo; @Before + @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); @@ -89,7 +104,41 @@ public void setUp() throws Exception { controller = new ReservedClusterStateService(clusterService, List.of(new ReservedClusterSettingsAction(clusterSettings))); - fileSettingsService = new FileSettingsService(clusterService, controller, env); + DiscoveryNode discoveryNode = new DiscoveryNode( + "_node_id", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); + + nodeInfo = new NodeInfo( + Version.CURRENT, + Build.CURRENT, + discoveryNode, + Settings.EMPTY, + null, + null, + null, + null, + null, + null, + null, + new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))), + null, + null + ); + NodesInfoResponse response = new NodesInfoResponse(new ClusterName("elasticsearch"), List.of(nodeInfo), List.of()); + + clusterAdminClient = mock(ClusterAdminClient.class); + doAnswer(i -> { + ((ActionListener) i.getArgument(1)).onResponse(response); + return null; + }).when(clusterAdminClient).nodesInfo(any(), any()); + + nodeClient = mock(NodeClient.class); + fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, nodeClient)); + doAnswer(i -> clusterAdminClient).when(fileSettingsService).clusterAdminClient(); } @After @@ -174,9 +223,10 @@ public void testInitialFile() throws Exception { doAnswer((Answer) invocation -> { ((Consumer) invocation.getArgument(2)).accept(new IllegalStateException("Some exception")); return null; - }).when(stateService).process(any(), (XContentParser) any(), any()); + }).when(stateService).process(any(), (ReservedStateChunk) any(), any()); - FileSettingsService service = spy(new FileSettingsService(clusterService, stateService, env)); + FileSettingsService service = spy(new FileSettingsService(clusterService, stateService, env, nodeClient)); + doAnswer(i -> clusterAdminClient).when(service).clusterAdminClient(); Files.createDirectories(service.operatorSettingsDir()); @@ -205,7 +255,7 @@ public void testInitialFile() throws Exception { doAnswer((Answer) invocation -> { ((Consumer) invocation.getArgument(2)).accept(null); return null; - }).when(stateService).process(any(), (XContentParser) any(), any()); + }).when(stateService).process(any(), (ReservedStateChunk) any(), any()); service.start(); service.startWatcher(clusterService.state(), true); @@ -219,13 +269,14 @@ public void testInitialFile() throws Exception { @SuppressWarnings("unchecked") public void testStopWorksInMiddleOfProcessing() throws Exception { var spiedController = spy(controller); - var fsService = new FileSettingsService(clusterService, spiedController, env); - + var fsService = new FileSettingsService(clusterService, spiedController, env, nodeClient); FileSettingsService service = spy(fsService); + doAnswer(i -> clusterAdminClient).when(service).clusterAdminClient(); + CountDownLatch processFileLatch = new CountDownLatch(1); CountDownLatch deadThreadLatch = new CountDownLatch(1); - doAnswer((Answer) invocation -> { + doAnswer((Answer) invocation -> { processFileLatch.countDown(); new Thread(() -> { // Simulate a thread that never comes back and decrements the @@ -236,8 +287,8 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { throw new RuntimeException(e); } }).start(); - return null; - }).when(spiedController).process(any(String.class), any(XContentParser.class), any(Consumer.class)); + return new ReservedStateChunk(Collections.emptyMap(), new ReservedStateVersion(1L, Version.CURRENT)); + }).when(spiedController).parse(any(String.class), any()); service.start(); assertTrue(service.watching()); @@ -249,7 +300,7 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, // on Linux is instantaneous. Windows is instantaneous too. - processFileLatch.await(30, TimeUnit.SECONDS); + assertTrue(processFileLatch.await(30, TimeUnit.SECONDS)); // Stopping the service should interrupt the watcher thread, we should be able to stop service.stop(); @@ -262,13 +313,14 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { @SuppressWarnings("unchecked") public void testStopWorksIfProcessingDidntReturnYet() throws Exception { var spiedController = spy(controller); - var fsService = new FileSettingsService(clusterService, spiedController, env); + var fsService = new FileSettingsService(clusterService, spiedController, env, nodeClient); FileSettingsService service = spy(fsService); + doAnswer(i -> clusterAdminClient).when(service).clusterAdminClient(); CountDownLatch processFileLatch = new CountDownLatch(1); CountDownLatch deadThreadLatch = new CountDownLatch(1); - doAnswer((Answer) invocation -> { + doAnswer((Answer) invocation -> { processFileLatch.countDown(); // allow the other thread to continue, but hold on a bit to avoid // setting the count-down latch in the main watcher loop. @@ -282,8 +334,8 @@ public void testStopWorksIfProcessingDidntReturnYet() throws Exception { throw new RuntimeException(e); } }).start(); - return null; - }).when(spiedController).process(any(String.class), any(XContentParser.class), any(Consumer.class)); + return new ReservedStateChunk(Collections.emptyMap(), new ReservedStateVersion(1L, Version.CURRENT)); + }).when(spiedController).parse(any(String.class), any()); service.start(); assertTrue(service.watching()); @@ -295,7 +347,7 @@ public void testStopWorksIfProcessingDidntReturnYet() throws Exception { // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, // on Linux is instantaneous. Windows is instantaneous too. - processFileLatch.await(30, TimeUnit.SECONDS); + assertTrue(processFileLatch.await(30, TimeUnit.SECONDS)); // Stopping the service should interrupt the watcher thread, we should be able to stop service.stop(); @@ -304,4 +356,118 @@ public void testStopWorksIfProcessingDidntReturnYet() throws Exception { // let the deadlocked thread end, so we can cleanly exit the test deadThreadLatch.countDown(); } + + @SuppressWarnings("unchecked") + public void testNodeInfosRefresh() throws Exception { + var spiedController = spy(controller); + var csAdminClient = spy(clusterAdminClient); + var response = new NodesInfoResponse(new ClusterName("elasticsearch"), List.of(nodeInfo), List.of()); + + doAnswer(i -> { + ((ActionListener) i.getArgument(1)).onResponse(response); + return null; + }).when(csAdminClient).nodesInfo(any(), any()); + + var service = spy(new FileSettingsService(clusterService, spiedController, env, nodeClient)); + doAnswer(i -> csAdminClient).when(service).clusterAdminClient(); + + doAnswer( + (Answer) invocation -> new ReservedStateChunk( + Collections.emptyMap(), + new ReservedStateVersion(1L, Version.CURRENT) + ) + ).when(spiedController).parse(any(String.class), any()); + + Files.createDirectories(service.operatorSettingsDir()); + // Make some fake settings file to cause the file settings service to process it + Files.write(service.operatorSettingsFile(), "{}".getBytes(StandardCharsets.UTF_8)); + + clearInvocations(csAdminClient); + clearInvocations(spiedController); + + // we haven't fetched the node infos ever, since we haven't done any file processing + assertNull(service.nodeInfos()); + + // call the processing twice + service.processFileSettings(service.operatorSettingsFile(), (e) -> { + if (e != null) { + fail("shouldn't get an exception"); + } + }); + // after the first processing we should have node infos + assertEquals(1, service.nodeInfos().getNodes().size()); + + service.processFileSettings(service.operatorSettingsFile(), (e) -> { + if (e != null) { + fail("shouldn't get an exception"); + } + }); + + // node infos should have been fetched only once + verify(csAdminClient, times(1)).nodesInfo(any(), any()); + verify(spiedController, times(2)).process(any(), any(ReservedStateChunk.class), any()); + + // pretend we added a new node + + final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); + + NodeInfo localNodeInfo = new NodeInfo( + Version.CURRENT, + Build.CURRENT, + localNode, + Settings.EMPTY, + null, + null, + null, + null, + null, + null, + null, + new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))), + null, + null + ); + var newResponse = new NodesInfoResponse(new ClusterName("elasticsearch"), List.of(nodeInfo, localNodeInfo), List.of()); + + final ClusterState prevState = clusterService.state(); + final ClusterState clusterState = ClusterState.builder(prevState) + .nodes( + DiscoveryNodes.builder(prevState.getNodes()).add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()) + ) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("transport", clusterState, prevState); + assertTrue(event.nodesChanged()); + service.clusterChanged(event); + + doAnswer(i -> { + ((ActionListener) i.getArgument(1)).onResponse(newResponse); + return null; + }).when(csAdminClient).nodesInfo(any(), any()); + + // this wouldn't change yet, node fetch transport action is invoked on demand, when we need to process file changes, + // not every time we update the cluster state + assertEquals(1, service.nodeInfos().getNodes().size()); + + // call the processing twice + service.processFileSettings(service.operatorSettingsFile(), (e) -> { + if (e != null) { + fail("shouldn't get an exception"); + } + }); + + assertEquals(2, service.nodeInfos().getNodes().size()); + + service.processFileSettings(service.operatorSettingsFile(), (e) -> { + if (e != null) { + fail("shouldn't get an exception"); + } + }); + + assertEquals(2, service.nodeInfos().getNodes().size()); + + // node infos should have been fetched one more time + verify(csAdminClient, times(2)).nodesInfo(any(), any()); + verify(spiedController, times(4)).process(any(), any(ReservedStateChunk.class), any()); + } } From 55c489d83d1578ad4e00491fe75bbf4987819296 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 27 Sep 2022 12:44:06 -0700 Subject: [PATCH 24/26] Generate server resources on IDE import (#90420) --- build-tools-internal/src/main/groovy/elasticsearch.ide.gradle | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 91f6345d4c4ba..65d27fbe73ff6 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -110,7 +110,9 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { description = 'Builds artifacts needed as dependency for IDE modules' dependsOn ':client:rest-high-level:shadowJar', ':plugins:repository-hdfs:hadoop-client-api:shadowJar', - ':libs:elasticsearch-x-content:generateProviderImpl' + ':libs:elasticsearch-x-content:generateProviderImpl', + ':server:generateModulesList', + ':server:generatePluginsList' } idea { From e21be6ece781b4272eeb99d2da55642d7d5a86cf Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 27 Sep 2022 15:40:16 -0500 Subject: [PATCH 25/26] Preventing serialization errors in the nodes stats API (#90319) Preventing serialization errors in the nodes stats API, and adding logging to the ingest counter code so that we can find the root cause of the problem in the future. --- docs/changelog/90319.yaml | 6 + .../ingest/CompoundProcessor.java | 41 +++++-- .../ingest/ConditionalProcessor.java | 29 ++++- .../elasticsearch/ingest/IngestMetric.java | 22 +++- .../elasticsearch/ingest/IngestService.java | 113 ++++++++++-------- .../org/elasticsearch/ingest/Pipeline.java | 27 ++++- .../ingest/IngestMetricTests.java | 3 +- 7 files changed, 168 insertions(+), 73 deletions(-) create mode 100644 docs/changelog/90319.yaml diff --git a/docs/changelog/90319.yaml b/docs/changelog/90319.yaml new file mode 100644 index 0000000000000..0d94afcd1c12e --- /dev/null +++ b/docs/changelog/90319.yaml @@ -0,0 +1,6 @@ +pr: 90319 +summary: Preventing serialization errors in the nodes stats API +area: Ingest Node +type: bug +issues: + - 77973 diff --git a/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java b/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java index 3d5637c36f012..74700ddd6d4e5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java @@ -8,6 +8,8 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.core.Tuple; @@ -16,6 +18,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.LongSupplier; import java.util.stream.Collectors; @@ -30,6 +33,8 @@ public class CompoundProcessor implements Processor { public static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag"; public static final String ON_FAILURE_PIPELINE_FIELD = "on_failure_pipeline"; + private static final Logger logger = LogManager.getLogger(CompoundProcessor.class); + private final boolean ignoreFailure; private final List processors; private final List onFailureProcessors; @@ -191,25 +196,43 @@ void innerExecute(int currentProcessor, IngestDocument ingestDocument, BiConsume final IngestMetric finalMetric = processorsWithMetrics.get(currentProcessor).v2(); final Processor finalProcessor = processorsWithMetrics.get(currentProcessor).v1(); final IngestDocument finalIngestDocument = ingestDocument; + /* + * Our assumption is that the listener passed to the processor is only ever called once. However, there is no way to enforce + * that in all processors and all of the code that they call. If the listener is called more than once it causes problems + * such as the metrics being wrong. The listenerHasBeenCalled variable is used to make sure that the code in the listener + * is only executed once. + */ + final AtomicBoolean listenerHasBeenCalled = new AtomicBoolean(false); finalMetric.preIngest(); + final AtomicBoolean postIngestHasBeenCalled = new AtomicBoolean(false); try { finalProcessor.execute(ingestDocument, (result, e) -> { - long ingestTimeInNanos = relativeTimeProvider.getAsLong() - finalStartTimeInNanos; - finalMetric.postIngest(ingestTimeInNanos); - - if (e != null) { - executeOnFailureOuter(finalCurrentProcessor, finalIngestDocument, handler, finalProcessor, finalMetric, e); + if (listenerHasBeenCalled.getAndSet(true)) { + logger.warn("A listener was unexpectedly called more than once", new RuntimeException()); + assert false : "A listener was unexpectedly called more than once"; } else { - if (result != null) { - innerExecute(nextProcessor, result, handler); + long ingestTimeInNanos = relativeTimeProvider.getAsLong() - finalStartTimeInNanos; + finalMetric.postIngest(ingestTimeInNanos); + postIngestHasBeenCalled.set(true); + if (e != null) { + executeOnFailureOuter(finalCurrentProcessor, finalIngestDocument, handler, finalProcessor, finalMetric, e); } else { - handler.accept(null, null); + if (result != null) { + innerExecute(nextProcessor, result, handler); + } else { + handler.accept(null, null); + } } } }); } catch (Exception e) { long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos; - finalMetric.postIngest(ingestTimeInNanos); + if (postIngestHasBeenCalled.get()) { + logger.warn("Preventing postIngest from being called more than once", new RuntimeException()); + assert false : "Attempt to call postIngest more than once"; + } else { + finalMetric.postIngest(ingestTimeInNanos); + } executeOnFailureOuter(currentProcessor, finalIngestDocument, handler, finalProcessor, finalMetric, e); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java index 528bb402a59e8..858fe5e675b36 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java @@ -8,6 +8,8 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.script.DynamicMap; @@ -26,6 +28,7 @@ import java.util.ListIterator; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; @@ -45,6 +48,8 @@ public class ConditionalProcessor extends AbstractProcessor implements WrappingP return value; }); + private static final Logger logger = LogManager.getLogger(ConditionalProcessor.class); + static final String TYPE = "conditional"; private final Script condition; @@ -120,15 +125,27 @@ public void execute(IngestDocument ingestDocument, BiConsumer { - long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos; - metric.postIngest(ingestTimeInNanos); - if (e != null) { - metric.ingestFailed(); - handler.accept(null, e); + if (listenerHasBeenCalled.getAndSet(true)) { + logger.warn("A listener was unexpectedly called more than once", new RuntimeException()); + assert false : "A listener was unexpectedly called more than once"; } else { - handler.accept(result, null); + long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos; + metric.postIngest(ingestTimeInNanos); + if (e != null) { + metric.ingestFailed(); + handler.accept(null, e); + } else { + handler.accept(result, null); + } } }); } else { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetric.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetric.java index de26acff1a024..13c57d9e06b28 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestMetric.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetric.java @@ -8,6 +8,8 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.metrics.CounterMetric; import java.util.concurrent.TimeUnit; @@ -22,6 +24,8 @@ */ class IngestMetric { + private static final Logger logger = LogManager.getLogger(IngestMetric.class); + /** * The time it takes to complete the measured item. */ @@ -53,7 +57,19 @@ void preIngest() { */ void postIngest(long ingestTimeInNanos) { long current = ingestCurrent.decrementAndGet(); - assert current >= 0 : "ingest metric current count double-decremented"; + if (current < 0) { + /* + * This ought to never happen. However if it does, it's incredibly bad because ingestCurrent being negative causes a + * serialization error that prevents the nodes stats API from working. So we're doing 3 things here: + * (1) Log a stack trace at warn level so that the Elasticsearch engineering team can track down and fix the source of the + * bug if it still exists + * (2) Throw an AssertionError if assertions are enabled so that we are aware of the bug + * (3) Increment the counter back up so that we don't hit serialization failures + */ + logger.warn("Current ingest counter decremented below 0", new RuntimeException()); + assert false : "ingest metric current count double-decremented"; + ingestCurrent.incrementAndGet(); + } this.ingestTimeInNanos.inc(ingestTimeInNanos); ingestCount.inc(); } @@ -84,6 +100,8 @@ void add(IngestMetric metrics) { IngestStats.Stats createStats() { // we track ingestTime at nanosecond resolution, but IngestStats uses millisecond resolution for reporting long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(ingestTimeInNanos.count()); - return new IngestStats.Stats(ingestCount.count(), ingestTimeInMillis, ingestCurrent.get(), ingestFailed.count()); + // It is possible for the current count to briefly drop below 0, causing serialization problems. See #90319 + long currentCount = Math.max(0, ingestCurrent.get()); + return new IngestStats.Stats(ingestCount.count(), ingestTimeInMillis, currentCount, ingestFailed.count()); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 387ea98aa4f97..5e50e94de2f33 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -72,6 +72,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -911,60 +912,72 @@ private void innerExecute( VersionType versionType = indexRequest.versionType(); Map sourceAsMap = indexRequest.sourceAsMap(); IngestDocument ingestDocument = new IngestDocument(index, id, version, routing, versionType, sourceAsMap); + /* + * Our assumption is that the listener passed to the processor is only ever called once. However, there is no way to enforce + * that in all processors and all of the code that they call. If the listener is called more than once it causes problems + * such as the metrics being wrong. The listenerHasBeenCalled variable is used to make sure that the code in the listener + * is only executed once. + */ + final AtomicBoolean listenerHasBeenCalled = new AtomicBoolean(false); ingestDocument.executePipeline(pipeline, (result, e) -> { - long ingestTimeInNanos = System.nanoTime() - startTimeInNanos; - totalMetrics.postIngest(ingestTimeInNanos); - if (e != null) { - totalMetrics.ingestFailed(); - handler.accept(e); - } else if (result == null) { - itemDroppedHandler.accept(slot); - handler.accept(null); + if (listenerHasBeenCalled.getAndSet(true)) { + logger.warn("A listener was unexpectedly called more than once", new RuntimeException()); + assert false : "A listener was unexpectedly called more than once"; } else { - org.elasticsearch.script.Metadata metadata = ingestDocument.getMetadata(); - - // it's fine to set all metadata fields all the time, as ingest document holds their starting values - // before ingestion, which might also get modified during ingestion. - indexRequest.index(metadata.getIndex()); - indexRequest.id(metadata.getId()); - indexRequest.routing(metadata.getRouting()); - indexRequest.version(metadata.getVersion()); - if (metadata.getVersionType() != null) { - indexRequest.versionType(VersionType.fromString(metadata.getVersionType())); - } - Number number; - if ((number = metadata.getIfSeqNo()) != null) { - indexRequest.setIfSeqNo(number.longValue()); - } - if ((number = metadata.getIfPrimaryTerm()) != null) { - indexRequest.setIfPrimaryTerm(number.longValue()); - } - try { - boolean ensureNoSelfReferences = ingestDocument.doNoSelfReferencesCheck(); - indexRequest.source(ingestDocument.getSource(), indexRequest.getContentType(), ensureNoSelfReferences); - } catch (IllegalArgumentException ex) { - // An IllegalArgumentException can be thrown when an ingest - // processor creates a source map that is self-referencing. - // In that case, we catch and wrap the exception so we can - // include which pipeline failed. + long ingestTimeInNanos = System.nanoTime() - startTimeInNanos; + totalMetrics.postIngest(ingestTimeInNanos); + if (e != null) { totalMetrics.ingestFailed(); - handler.accept( - new IllegalArgumentException( - "Failed to generate the source document for ingest pipeline [" + pipeline.getId() + "]", - ex - ) - ); - return; - } - Map map; - if ((map = metadata.getDynamicTemplates()) != null) { - Map mergedDynamicTemplates = new HashMap<>(indexRequest.getDynamicTemplates()); - mergedDynamicTemplates.putAll(map); - indexRequest.setDynamicTemplates(mergedDynamicTemplates); - } - postIngest(ingestDocument, indexRequest); + handler.accept(e); + } else if (result == null) { + itemDroppedHandler.accept(slot); + handler.accept(null); + } else { + org.elasticsearch.script.Metadata metadata = ingestDocument.getMetadata(); + + // it's fine to set all metadata fields all the time, as ingest document holds their starting values + // before ingestion, which might also get modified during ingestion. + indexRequest.index(metadata.getIndex()); + indexRequest.id(metadata.getId()); + indexRequest.routing(metadata.getRouting()); + indexRequest.version(metadata.getVersion()); + if (metadata.getVersionType() != null) { + indexRequest.versionType(VersionType.fromString(metadata.getVersionType())); + } + Number number; + if ((number = metadata.getIfSeqNo()) != null) { + indexRequest.setIfSeqNo(number.longValue()); + } + if ((number = metadata.getIfPrimaryTerm()) != null) { + indexRequest.setIfPrimaryTerm(number.longValue()); + } + try { + boolean ensureNoSelfReferences = ingestDocument.doNoSelfReferencesCheck(); + indexRequest.source(ingestDocument.getSource(), indexRequest.getContentType(), ensureNoSelfReferences); + } catch (IllegalArgumentException ex) { + // An IllegalArgumentException can be thrown when an ingest + // processor creates a source map that is self-referencing. + // In that case, we catch and wrap the exception so we can + // include which pipeline failed. + totalMetrics.ingestFailed(); + handler.accept( + new IllegalArgumentException( + "Failed to generate the source document for ingest pipeline [" + pipeline.getId() + "]", + ex + ) + ); + return; + } + Map map; + if ((map = metadata.getDynamicTemplates()) != null) { + Map mergedDynamicTemplates = new HashMap<>(indexRequest.getDynamicTemplates()); + mergedDynamicTemplates.putAll(map); + indexRequest.setDynamicTemplates(mergedDynamicTemplates); + } + postIngest(ingestDocument, indexRequest); - handler.accept(null); + handler.accept(null); + } } }); } diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index e9d78bd9f1003..8061e7a90aee9 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -8,6 +8,8 @@ package org.elasticsearch.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.core.Nullable; import org.elasticsearch.script.ScriptService; @@ -16,6 +18,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.LongSupplier; @@ -30,6 +33,8 @@ public final class Pipeline { public static final String ON_FAILURE_KEY = "on_failure"; public static final String META_KEY = "_meta"; + private static final Logger logger = LogManager.getLogger(Pipeline.class); + private final String id; @Nullable private final String description; @@ -113,14 +118,26 @@ public static Pipeline create( */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { final long startTimeInNanos = relativeTimeProvider.getAsLong(); + /* + * Our assumption is that the listener passed to the processor is only ever called once. However, there is no way to enforce + * that in all processors and all of the code that they call. If the listener is called more than once it causes problems + * such as the metrics being wrong. The listenerHasBeenCalled variable is used to make sure that the code in the listener + * is only executed once. + */ + final AtomicBoolean listenerHasBeenCalled = new AtomicBoolean(false); metrics.preIngest(); compoundProcessor.execute(ingestDocument, (result, e) -> { - long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos; - metrics.postIngest(ingestTimeInNanos); - if (e != null) { - metrics.ingestFailed(); + if (listenerHasBeenCalled.getAndSet(true)) { + logger.warn("A listener was unexpectedly called more than once", new RuntimeException()); + assert false : "A listener was unexpectedly called more than once"; + } else { + long ingestTimeInNanos = relativeTimeProvider.getAsLong() - startTimeInNanos; + metrics.postIngest(ingestTimeInNanos); + if (e != null) { + metrics.ingestFailed(); + } + handler.accept(result, e); } - handler.accept(result, e); }); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestMetricTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestMetricTests.java index d4fd0d6c0ef52..06fa24e493949 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestMetricTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestMetricTests.java @@ -42,7 +42,8 @@ public void testPostIngestDoubleDecrement() { // the second postIngest triggers an assertion error expectThrows(AssertionError.class, () -> metric.postIngest(500000L)); - assertThat(-1L, equalTo(metric.createStats().getIngestCurrent())); + // We never allow the reported ingestCurrent to be negative: + assertThat(metric.createStats().getIngestCurrent(), equalTo(0L)); } } From 6d8332ad045155b3aaa49f6363fa3a984a6cb16c Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 27 Sep 2022 14:48:38 -0700 Subject: [PATCH 26/26] [DOCS] Fix links to .NET and PHP clients (#90276) --- .../docs/en/security/ccs-clients-integrations/http.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc index e75753a6dc035..368fd8c1b7a0b 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc @@ -85,8 +85,8 @@ specific clients, refer to: * {java-api-client}/_basic_authentication.html[Java] * {jsclient-current}/auth-reference.html[JavaScript] -* https://www.elastic.co/guide/en/elasticsearch/client/net-api/master/configuration-options.html[.NET] +* {es-dotnet-client}/configuration.html[.NET] * https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl] -* https://www.elastic.co/guide/en/elasticsearch/client/php-api/master/security.html[PHP] +* {es-php-client}/connecting.html[PHP] * https://elasticsearch-py.readthedocs.io/en/master/#ssl-and-authentication[Python] * https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication[Ruby]