From 70d911cad98e843f4702673015abce508190d389 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 21:26:36 -0700 Subject: [PATCH 01/11] [AUTO] [main] Added bwc version 2.2.2. (#4383) * Added bwc version 2.2.2 * Add changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani Co-authored-by: opensearch-ci-bot Co-authored-by: Kunal Kotwani --- .ci/bwcVersions | 1 + CHANGELOG.md | 4 +++- server/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1ba3ee562317a..914426eebe35e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -49,4 +49,5 @@ BWC_VERSION: - "2.1.1" - "2.2.0" - "2.2.1" + - "2.2.2" - "2.3.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 376c8f37c8063..76f134f10c29e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) + ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 @@ -58,4 +60,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ba512d3fbcdd9..10e5f16419a7a 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -96,6 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From c885686b0fb12d9b0397d38c37518c65cbb466c5 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 11:52:02 -0700 Subject: [PATCH 02/11] [Segment Replication] Bump segment infos counter before commit during replica promotion (#4365) * [Segment Replication] Bump segment infos counter before commit during replica promotion Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../opensearch/index/engine/NRTReplicationEngine.java | 9 +++++++++ .../index/engine/NRTReplicationEngineTests.java | 2 ++ 3 files changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76f134f10c29e..48d320dd5bce6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 6f5b7030ed65f..cf753e3360c39 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -54,6 +54,8 @@ public class NRTReplicationEngine extends Engine { private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; + private static final int SI_COUNTER_INCREMENT = 10; + public NRTReplicationEngine(EngineConfig engineConfig) { super(engineConfig); store.incRef(); @@ -142,6 +144,13 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th public void commitSegmentInfos() throws IOException { // TODO: This method should wait for replication events to finalize. final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + /* + This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied + from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is + used to generate new segment file names. The ideal solution is to identify the counter from previous primary. + */ + latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; + latestSegmentInfos.changed(); store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); translogManager.syncTranslog(); } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 0008afcc901c7..540054782133a 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -252,6 +252,8 @@ public void testCommitSegmentInfos() throws Exception { // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); assertEquals(previousInfos.getGeneration(), latestSegmentInfos.getLastGeneration()); + assertEquals(previousInfos.getVersion(), latestSegmentInfos.getVersion()); + assertEquals(previousInfos.counter, latestSegmentInfos.counter); Map userData = latestSegmentInfos.getUserData(); assertEquals(processedCheckpoint, localCheckpointTracker.getProcessedCheckpoint()); assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); From b206e98acb69c9d839b7eef74edff8f904eb4b88 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 11:52:35 -0700 Subject: [PATCH 03/11] [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica (#4363) * [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Address review comments 2 Signed-off-by: Suraj Singh * Test failures Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationTarget.java | 4 ++ .../SegmentReplicationTargetService.java | 29 +++++++---- .../common/ReplicationCollection.java | 16 ++++-- .../SegmentReplicationTargetServiceTests.java | 49 ++++++++++++++++++- .../recovery/ReplicationCollectionTests.java | 18 +++++++ 6 files changed, 101 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48d320dd5bce6..182a6b36fca48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) +- [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index d1d6104a416ca..7c28406036ddd 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -56,6 +56,10 @@ public class SegmentReplicationTarget extends ReplicationTarget { private final SegmentReplicationState state; protected final MultiFileWriter multiFileWriter; + public ReplicationCheckpoint getCheckpoint() { + return this.checkpoint; + } + public SegmentReplicationTarget( ReplicationCheckpoint checkpoint, IndexShard indexShard, diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 9e6b66dc4d7d6..8fc53ccd3bc08 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -18,6 +18,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -34,7 +35,6 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; -import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -54,7 +54,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final SegmentReplicationSourceFactory sourceFactory; - private final Map latestReceivedCheckpoint = new HashMap<>(); + private final Map latestReceivedCheckpoint = ConcurrentCollections.newConcurrentMap(); // Empty Implementation, only required while Segment Replication is under feature flag. public static final SegmentReplicationTargetService NO_OP = new SegmentReplicationTargetService() { @@ -151,14 +151,23 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe } else { latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); } - if (onGoingReplications.isShardReplicating(replicaShard.shardId())) { - logger.trace( - () -> new ParameterizedMessage( - "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", - replicaShard.getLatestReplicationCheckpoint() - ) - ); - return; + SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); + if (ongoingReplicationTarget != null) { + if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { + logger.trace( + "Cancelling ongoing replication from old primary with primary term {}", + ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() + ); + onGoingReplications.cancel(ongoingReplicationTarget.getId(), "Cancelling stuck target after new primary"); + } else { + logger.trace( + () -> new ParameterizedMessage( + "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", + replicaShard.getLatestReplicationCheckpoint() + ) + ); + return; + } } final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index d648ca6041ff8..20600856c9444 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -49,6 +49,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; /** * This class holds a collection of all on going replication events on the current node (i.e., the node is the target node @@ -236,13 +237,18 @@ public boolean cancelForShard(ShardId shardId, String reason) { } /** - * check if a shard is currently replicating + * Get target for shard * - * @param shardId shardId for which to check if replicating - * @return true if shard is currently replicating + * @param shardId shardId + * @return ReplicationTarget for input shardId */ - public boolean isShardReplicating(ShardId shardId) { - return onGoingTargetEvents.values().stream().anyMatch(t -> t.indexShard.shardId().equals(shardId)); + public T getOngoingReplicationTarget(ShardId shardId) { + final List replicationTargetList = onGoingTargetEvents.values() + .stream() + .filter(t -> t.indexShard.shardId().equals(shardId)) + .collect(Collectors.toList()); + assert replicationTargetList.size() <= 1 : "More than one on-going replication targets"; + return replicationTargetList.size() > 0 ? replicationTargetList.get(0) : null; } /** diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7d9b0f09f21cd..1d253b0a9a300 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -49,6 +49,8 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private ReplicationCheckpoint initialCheckpoint; private ReplicationCheckpoint aheadCheckpoint; + private ReplicationCheckpoint newPrimaryCheckpoint; + @Override public void setUp() throws Exception { super.setUp(); @@ -74,6 +76,13 @@ public void setUp() throws Exception { initialCheckpoint.getSeqNo(), initialCheckpoint.getSegmentInfosVersion() + 1 ); + newPrimaryCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm() + 1, + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSeqNo(), + initialCheckpoint.getSegmentInfosVersion() + 1 + ); } @Override @@ -160,7 +169,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, + initialCheckpoint, replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) @@ -185,9 +194,47 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); + verify(targetSpy, times(0)).cancel(any()); verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); } + public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws IOException, InterruptedException { + // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. + SegmentReplicationTargetService serviceSpy = spy(sut); + // Create a Mockito spy of target to stub response of few method calls. + final SegmentReplicationTarget targetSpy = spy( + new SegmentReplicationTarget( + initialCheckpoint, + replicaShard, + replicationSource, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) + ); + + CountDownLatch latch = new CountDownLatch(1); + // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown + // of latch. + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + // a new checkpoint arrives before we've completed. + serviceSpy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); + listener.onResponse(null); + latch.countDown(); + return null; + }).when(targetSpy).startReplication(any()); + doNothing().when(targetSpy).onDone(); + + // start replication. This adds the target to on-ongoing replication collection + serviceSpy.startReplication(targetSpy); + + // wait for the new checkpoint to arrive, before the listener completes. + latch.await(5, TimeUnit.SECONDS); + doNothing().when(targetSpy).startReplication(any()); + verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); + verify(serviceSpy, times(1)).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); + closeShards(replicaShard); + } + public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(checkpoint, replicaShard); diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 7587f48503625..1789dd3b2a288 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -105,7 +105,25 @@ public void onFailure(ReplicationState state, OpenSearchException e, boolean sen collection.cancel(recoveryId, "meh"); } } + } + public void testMultiReplicationsForSingleShard() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); + final IndexShard shard1 = shards.addReplica(); + final IndexShard shard2 = shards.addReplica(); + final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard1); + final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shard2); + try { + collection.getOngoingReplicationTarget(shard1.shardId()); + } catch (AssertionError e) { + assertEquals(e.getMessage(), "More than one on-going replication targets"); + } finally { + collection.cancel(recoveryId, "meh"); + collection.cancel(recoveryId2, "meh"); + } + closeShards(shard1, shard2); + } } public void testRecoveryCancellation() throws Exception { From 0c10674924d66547d009237f8dd333243aac6ac4 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 2 Sep 2022 13:44:38 -0700 Subject: [PATCH 04/11] =?UTF-8?q?Adding=20support=20for=20labels=20on=20ve?= =?UTF-8?q?rsion=20bump=20PRs,=20skip=20label=20support=20for=E2=80=A6=20(?= =?UTF-8?q?#4391)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Adding support for labels on version bump PRs, skip label support for changelog verifier Signed-off-by: Kunal Kotwani * Add changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 2 ++ .github/workflows/version.yml | 8 +++++++- CHANGELOG.md | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cda5dde462068..96f99f17b016e 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -14,3 +14,5 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/changelog-enforcer@v3 + with: + skipLabels: "autocut" diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 030689642677a..42c2d21d106ce 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,7 +5,7 @@ on: tags: - '*.*.*' -jobs: +jobs: build: runs-on: ubuntu-latest steps: @@ -61,6 +61,8 @@ jobs: commit-message: Incremented version to ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] Incremented version to ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and incremented the version from ${{ env.CURRENT_VERSION }} to ${{ env.NEXT_VERSION }}. @@ -86,6 +88,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [${{ env.BASE_X }}] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. @@ -111,6 +115,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [main] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. diff --git a/CHANGELOG.md b/CHANGELOG.md index 182a6b36fca48..dc6c290253dbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) +- Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 From fab2a122cbb5ce535066d3c3b9c4ae9012d2e331 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 18:57:39 -0700 Subject: [PATCH 05/11] [Segment Replication] Extend FileChunkWriter to allow cancel on transport client (#4386) * [Segment Replication] Extend FileChunkWriter to allow cancel on retryable transport client Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Integration test Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 72 ++++++++++++++++++- .../indices/recovery/FileChunkWriter.java | 2 + .../OngoingSegmentReplications.java | 2 +- .../RemoteSegmentFileChunkWriter.java | 5 ++ .../SegmentReplicationSourceHandler.java | 6 +- .../SegmentReplicationSourceHandlerTests.java | 3 + 7 files changed, 88 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc6c290253dbe..0930923805d96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) +- [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a9b6787d87bdf..16e9d78b17826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -33,17 +33,23 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collection; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -65,6 +71,11 @@ public static void assumeFeatureFlag() { assumeTrue("Segment replication Feature flag is enabled", Boolean.parseBoolean(System.getProperty(FeatureFlags.REPLICATION_TYPE))); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override public Settings indexSettings() { return Settings.builder() @@ -318,6 +329,65 @@ public void testReplicationAfterForceMerge() throws Exception { } } + public void testCancellation() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startNode(); + + final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( + SegmentReplicationSourceService.class, + primaryNode + ); + final IndexShard primaryShard = getIndexShard(primaryNode); + + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + FileChunkRequest req = (FileChunkRequest) request; + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + if (req.name().endsWith("cfs") && req.lastChunk()) { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + final int docCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(docCount); + waitForDocs(docCount, indexer); + + flush(INDEX_NAME); + } + segmentReplicationSourceService.beforeIndexShardClosed(primaryShard.shardId(), primaryShard, indexSettings()); + latch.countDown(); + assertDocCounts(docCount, primaryNode); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); diff --git a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java index cb43af3b82e09..f1cc7b8dd1d89 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java @@ -28,4 +28,6 @@ void writeFileChunk( int totalTranslogOps, ActionListener listener ); + + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 828aa29192fe3..1a97d334df58f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -126,7 +126,7 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener Date: Sun, 4 Sep 2022 21:55:47 +0530 Subject: [PATCH 06/11] Added RestLayer Changes for PIT stats (#4217) Signed-off-by: Ajay Kumar Movva --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 3 +++ .../rest/action/cat/RestIndicesAction.java | 27 +++++++++++++++++++ .../rest/action/cat/RestNodesAction.java | 16 +++++++++++ .../rest/action/cat/RestShardsAction.java | 15 +++++++++++ .../action/cat/RestShardsActionTests.java | 4 +-- 6 files changed, 64 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0930923805d96..c5af055dca8a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) ### Changed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index aa4abc7a11eae..f07a06aba4388 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -67,6 +67,9 @@ search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n segments.count .+ \n segments.memory .+ \n segments.index_writer_memory .+ \n diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index a8cdff5775478..f04d0ab712b39 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -597,6 +597,24 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "sibling:pri;alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell("pri.search.point_in_time_current", "default:false;text-align:right;desc:open point in time contexts"); + + table.addCell( + "search.point_in_time_time", + "sibling:pri;alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell("pri.search.point_in_time_time", "default:false;text-align:right;desc:time point in time contexts held open"); + + table.addCell( + "search.point_in_time_total", + "sibling:pri;alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("pri.search.point_in_time_total", "default:false;text-align:right;desc:completed point in time contexts"); + table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments"); @@ -878,6 +896,15 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCount()); + table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 8d3081bec48e9..6346e5d23cd34 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -310,6 +310,19 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); table.addCell( @@ -519,6 +532,9 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 6bf24951fe6c9..5cb5a7876669e 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -225,6 +225,18 @@ protected Table getTableWithHeader(final RestRequest request) { "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:spc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:spti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -390,6 +402,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index ed3aa19afa146..a8679a087216d 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -134,8 +134,8 @@ public void testBuildTable() { assertThat(row.get(3).value, equalTo(shardRouting.state())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(69).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(70).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); } } } From 4ed09955e7013405f21a959a044f131d3bf224f1 Mon Sep 17 00:00:00 2001 From: Movva Ajaykumar Date: Tue, 6 Sep 2022 05:56:39 +0530 Subject: [PATCH 07/11] Modified cat shards test for pit stats (#4408) Signed-off-by: Ajay Kumar Movva --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 93 ++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5af055dca8a6..3c7757c7bd070 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- Fixed the `_cat/shards/10_basic.yml` test cases fix. ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index f07a06aba4388..6ebe273d552cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,11 +1,14 @@ --- "Help": - skip: - version: " - 7.99.99" - reason: shard path stats were added in 8.0.0 + version: " - 2.9.99" + reason: point in time stats were added in 3.0.0 + features: node_selector - do: cat.shards: help: true + node_selector: + version: "3.0.0 - " - match: $body: | @@ -85,6 +88,92 @@ path.state .+ \n $/ --- +"Help before - 3.0.0": + - skip: + version: "3.0.0 - " + reason: point in time stats were added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: " - 2.9.99" + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + $/ +--- "Test cat shards output": - do: From ff2e4bf86bc00ff62b45a0acc21b9790946b0dc7 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 6 Sep 2022 17:27:58 +0530 Subject: [PATCH 08/11] [Remote Store] Add index specific setting for remote repository (#4253) * Add index specific setting for remote repository * Fix for failover incremental uploads Signed-off-by: Sachin Kale --- CHANGELOG.md | 1 + .../cluster/metadata/IndexMetadata.java | 58 +++++++++++++--- .../common/settings/IndexScopedSettings.java | 6 +- .../org/opensearch/index/IndexService.java | 2 +- .../org/opensearch/index/IndexSettings.java | 9 +++ .../shard/RemoteStoreRefreshListener.java | 7 ++ .../opensearch/index/IndexSettingsTests.java | 69 ++++++++++++++++++- 7 files changed, 139 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c7757c7bd070..4e033e1ffb2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) - Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) +- Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 759891e88039b..cd1c92a8b109f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -285,6 +285,8 @@ public Iterator> settings() { public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + public static final String SETTING_REMOTE_STORE_REPOSITORY = "index.remote_store.repository"; + public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; /** * Used to specify if the index data should be persisted in the remote store. @@ -322,6 +324,50 @@ public Iterator> settings() { Property.Final ); + /** + * Used to specify remote store repository to use for this index. + */ + public static final Setting INDEX_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + SETTING_REMOTE_STORE_REPOSITORY, + new Setting.Validator<>() { + + @Override + public void validate(final String value) {} + + @Override + public void validate(final String value, final Map, Object> settings) { + if (value == null || value.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + ); + } else { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_STORE_REPOSITORY_SETTING); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + + private static void validateRemoteStoreSettingEnabled(final Map, Object> settings, Setting setting) { + final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (isRemoteSegmentStoreEnabled == false) { + throw new IllegalArgumentException( + "Settings " + + setting.getKey() + + " can ont be set/enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is set to true" + ); + } + } + /** * Used to specify if the index translog operations should be persisted in the remote store. */ @@ -335,16 +381,8 @@ public void validate(final Boolean value) {} @Override public void validate(final Boolean value, final Map, Object> settings) { - final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); - if (isRemoteSegmentStoreEnabled == false && value == true) { - throw new IllegalArgumentException( - "Settings " - + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() - + " cannot be enabled when " - + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() - + " is set to " - + settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING) - ); + if (value == true) { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a3fa2c7ee3112..7be9adc786f24 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -223,7 +223,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REPLICATION_TYPE, Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING) + Arrays.asList( + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index e1427df1c34ab..92f957633db84 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -511,7 +511,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory( - clusterService.state().metadata().clusterUUID(), + this.indexSettings.getRemoteStoreRepository(), this.indexSettings, path ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 657cb1ee55cb9..9c7f4804755d4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -560,6 +560,7 @@ public final class IndexSettings { private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; private final boolean isRemoteTranslogStoreEnabled; + private final String remoteStoreRepository; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -721,6 +722,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); + remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -979,6 +981,13 @@ public boolean isRemoteTranslogStoreEnabled() { return isRemoteTranslogStoreEnabled; } + /** + * Returns if remote store is enabled for this index. + */ + public String getRemoteStoreRepository() { + return remoteStoreRepository; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 0d32e8d56e4d2..a8ca9891d9743 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -59,6 +59,13 @@ public RemoteStoreRefreshListener(IndexShard indexShard) { .getDelegate()).getDelegate(); this.primaryTerm = indexShard.getOperationPrimaryTerm(); localSegmentChecksumMap = new HashMap<>(); + if (indexShard.shardRouting.primary()) { + try { + this.remoteDirectory.init(); + } catch (IOException e) { + logger.error("Exception while initialising RemoteSegmentStoreDirectory", e); + } + } } @Override diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e02eac85beafb..de5ef8851ae80 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -851,7 +851,7 @@ public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) ); assertEquals( - "Settings index.remote_store.translog.enabled cannot be enabled when index.remote_store.enabled is set to false", + "Settings index.remote_store.translog.enabled can ont be set/enabled when index.remote_store.enabled is set to true", iae.getMessage() ); } @@ -876,4 +876,71 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { ); assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } + + public void testRemoteRepositoryDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertNull(settings.getRemoteStoreRepository()); + } + + public void testRemoteRepositoryExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, "repo1") + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertEquals("repo1", settings.getRemoteStoreRepository()); + } + + public void testUpdateRemoteRepositoryFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store.repository", randomUnicodeOfLength(10)).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store.repository], not updateable"); + } + + public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "repo1") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.repository can ont be set/enabled when index.remote_store.enabled is set to true", + iae.getMessage() + ); + } + + public void testSetRemoteRepositoryFailsWhenEmptyString() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals("Setting index.remote_store.repository should be provided with non-empty repository ID", iae.getMessage()); + } } From b0e1f6abe58f545b87d759afe5794b6eac0be3c1 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 6 Sep 2022 09:02:12 -0700 Subject: [PATCH 09/11] [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test (#4414) * [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Update changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationTarget.java | 2 +- .../SegmentReplicationTargetServiceTests.java | 20 +++++++++++-------- .../index/shard/IndexShardTestCase.java | 1 + 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e033e1ffb2bf..cb48b3aedeea5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. ### Security diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 7c28406036ddd..6a9406aca13b9 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -160,9 +160,9 @@ public void startReplication(ActionListener listener) { final StepListener getFilesListener = new StepListener<>(); final StepListener finalizeListener = new StepListener<>(); + cancellableThreads.checkForCancel(); logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. - cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 1d253b0a9a300..f2eb635f24bbf 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -29,6 +30,7 @@ import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.doAnswer; @@ -37,6 +39,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.eq; +import static org.opensearch.indices.replication.SegmentReplicationState.Stage.CANCELLED; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @@ -215,24 +218,25 @@ public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws I // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown // of latch. doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); + // short circuit loop on new checkpoint request + doReturn(null).when(serviceSpy).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); // a new checkpoint arrives before we've completed. serviceSpy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); - listener.onResponse(null); - latch.countDown(); + try { + invocation.callRealMethod(); + } catch (CancellableThreads.ExecutionCancelledException e) { + latch.countDown(); + } return null; }).when(targetSpy).startReplication(any()); - doNothing().when(targetSpy).onDone(); // start replication. This adds the target to on-ongoing replication collection serviceSpy.startReplication(targetSpy); - + latch.await(); // wait for the new checkpoint to arrive, before the listener completes. - latch.await(5, TimeUnit.SECONDS); - doNothing().when(targetSpy).startReplication(any()); + assertEquals(CANCELLED, targetSpy.state().getStage()); verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); verify(serviceSpy, times(1)).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); - closeShards(replicaShard); } public void testNewCheckpointBehindCurrentCheckpoint() { diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 1b40cb4f2dfa3..0838a1fe87aa4 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1207,6 +1207,7 @@ public void getCheckpointMetadata( copyState.getPendingDeleteFiles() ) ); + copyState.decRef(); } catch (IOException e) { logger.error("Unexpected error computing CopyState", e); Assert.fail("Failed to compute copyState"); From f97cb4b4bcf8ba15ed251b9ff97053a2f4f4619d Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 6 Sep 2022 10:06:24 -0700 Subject: [PATCH 10/11] Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. (#4366) * Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. This change fixes the errors that occur when computing metadata snapshots on primary shards from the latest in-memory SegmentInfos. The error occurs when a segments_N file that is referenced by the in-memory infos is deleted as part of a concurrent commit. The segments themselves are incref'd by IndexWriter.incRefDeleter but the commit file (Segments_N) is not. This change resolves this by ignoring the segments_N file when computing metadata for CopyState and only sending incref'd segment files to replicas. Signed-off-by: Marc Handalian * Fix spotless. Signed-off-by: Marc Handalian * Update StoreTests.testCleanupAndPreserveLatestCommitPoint to assert additional segments are deleted. Signed-off-by: Marc Handalian * Rename snapshot to metadataMap in CheckpointInfoResponse. Signed-off-by: Marc Handalian * Refactor segmentReplicationDiff method to compute off two maps instead of MetadataSnapshots. Signed-off-by: Marc Handalian * Fix spotless. Signed-off-by: Marc Handalian * Revert catchall in SegmentReplicationSourceService. Signed-off-by: Marc Handalian * Revert log lvl change. Signed-off-by: Marc Handalian * Fix SegmentReplicationTargetTests Signed-off-by: Marc Handalian * Cleanup unused logger. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian Co-authored-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 50 ++++++- .../org/opensearch/index/store/Store.java | 133 +++++++++--------- .../replication/CheckpointInfoResponse.java | 30 ++-- .../SegmentReplicationSourceService.java | 7 +- .../replication/SegmentReplicationTarget.java | 36 ++--- .../indices/replication/common/CopyState.java | 28 +--- .../SegmentReplicationIndexShardTests.java | 7 +- .../opensearch/index/store/StoreTests.java | 131 ++++++++++++++--- .../OngoingSegmentReplicationsTests.java | 28 ++-- .../SegmentReplicationSourceHandlerTests.java | 8 +- .../SegmentReplicationSourceServiceTests.java | 4 +- .../SegmentReplicationTargetServiceTests.java | 2 +- .../SegmentReplicationTargetTests.java | 48 ++----- .../replication/common/CopyStateTests.java | 10 +- .../index/shard/IndexShardTestCase.java | 8 +- 16 files changed, 301 insertions(+), 230 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb48b3aedeea5..a2b6528783a39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- Fix NoSuchFileExceptions with segment replication when computing primary metadata snapshots ([#4366](https://github.com/opensearch-project/OpenSearch/pull/4366)) - [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 16e9d78b17826..9b2ab753832d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -9,7 +9,6 @@ package org.opensearch.indices.replication; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.lucene.index.SegmentInfos; import org.junit.BeforeClass; import org.opensearch.action.admin.indices.segments.IndexShardSegments; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -586,13 +585,56 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); IndexShard indexShard = getIndexShard(replicaNode.getName()); - final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); // calls to readCommit will fail if a valid commit point and all its segments are not in the store. - SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); + indexShard.store().readLastCommittedSegmentsInfo(); } } } + public void testDropPrimaryDuringReplication() throws Exception { + final Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 6) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(Settings.EMPTY); + createIndex(INDEX_NAME, settings); + internalCluster().startDataOnlyNodes(6); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(100, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + // don't wait for replication to complete, stop the primary immediately. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); + + // start another replica. + internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + // index another doc and refresh - without this the new replica won't catch up. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertSegmentStats(6); + } + } + /** * Waits until the replica is caught up to the latest primary segments gen. * @throws Exception if assertion fails @@ -611,10 +653,12 @@ private void waitForReplicaUpdate() throws Exception { final List replicaShardSegments = segmentListMap.get(false); // if we don't have any segments yet, proceed. final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); + logger.debug("Primary Segments: {}", primaryShardSegments.getSegments()); if (primaryShardSegments.getSegments().isEmpty() == false) { final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); for (ShardSegments shardSegments : replicaShardSegments) { + logger.debug("Replica {} Segments: {}", shardSegments.getShardRouting(), shardSegments.getSegments()); final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() .stream() .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 58598ab2d08f4..9122c950a6ab6 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,6 +105,7 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -122,6 +123,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.store.Store.MetadataSnapshot.loadMetadata; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -334,6 +336,51 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio return new MetadataSnapshot(segmentInfos, directory, logger); } + /** + * Segment Replication method - Fetch a map of StoreFileMetadata for segments, ignoring Segment_N files. + * @param segmentInfos {@link SegmentInfos} from which to compute metadata. + * @return {@link Map} map file name to {@link StoreFileMetadata}. + */ + public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { + assert indexSettings.isSegRepEnabled(); + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } + + /** + * Segment Replication method + * Returns a diff between the Maps of StoreFileMetadata that can be used for getting list of files to copy over to a replica for segment replication. The returned diff will hold a list of files that are: + *
    + *
  • identical: they exist in both maps and they can be considered the same ie. they don't need to be recovered
  • + *
  • different: they exist in both maps but their they are not identical
  • + *
  • missing: files that exist in the source but not in the target
  • + *
+ */ + public static RecoveryDiff segmentReplicationDiff(Map source, Map target) { + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); + for (StoreFileMetadata value : source.values()) { + if (value.name().startsWith(IndexFileNames.SEGMENTS)) { + continue; + } + if (target.containsKey(value.name()) == false) { + missing.add(value); + } else { + final StoreFileMetadata fileMetadata = target.get(value.name()); + if (fileMetadata.isSame(value)) { + identical.add(value); + } else { + different.add(value); + } + } + } + return new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing) + ); + } + /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -709,31 +756,34 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } /** - * This method deletes every file in this store that is not contained in either the remote or local metadata snapshots. + * Segment Replication method - + * This method deletes every file in this store that is not referenced by the passed in SegmentInfos or + * part of the latest on-disk commit point. * This method is used for segment replication when the in memory SegmentInfos can be ahead of the on disk segment file. * In this case files from both snapshots must be preserved. Verification has been done that all files are present on disk. * @param reason the reason for this cleanup operation logged for each deleted file - * @param localSnapshot The local snapshot from in memory SegmentInfos. + * @param infos {@link SegmentInfos} Files from this infos will be preserved on disk if present. * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ - public void cleanupAndPreserveLatestCommitPoint(String reason, MetadataSnapshot localSnapshot) throws IOException { + public void cleanupAndPreserveLatestCommitPoint(String reason, SegmentInfos infos) throws IOException { + assert indexSettings.isSegRepEnabled(); // fetch a snapshot from the latest on disk Segments_N file. This can be behind // the passed in local in memory snapshot, so we want to ensure files it references are not removed. metadataLock.writeLock().lock(); try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - cleanupFiles(reason, localSnapshot, getMetadata(readLastCommittedSegmentsInfo())); + cleanupFiles(reason, getMetadata(readLastCommittedSegmentsInfo()), infos.files(true)); } finally { metadataLock.writeLock().unlock(); } } - private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable MetadataSnapshot additionalSnapshot) + private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable Collection additionalFiles) throws IOException { assert metadataLock.isWriteLockedByCurrentThread(); for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || localSnapshot.contains(existingFile) - || (additionalSnapshot != null && additionalSnapshot.contains(existingFile))) { + || (additionalFiles != null && additionalFiles.contains(existingFile))) { // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete // checksum) continue; @@ -825,17 +875,9 @@ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, l userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); latestSegmentInfos.setUserData(userData, true); latestSegmentInfos.commit(directory()); - - // similar to TrimUnsafeCommits, create a commit with an appending IW, this will delete old commits and ensure all files - // associated with the SegmentInfos.commit are fsynced. - final List existingCommits = DirectoryReader.listCommits(directory); - assert existingCommits.isEmpty() == false : "Expected at least one commit but none found"; - final IndexCommit lastIndexCommit = existingCommits.get(existingCommits.size() - 1); - assert latestSegmentInfos.getSegmentsFileName().equals(lastIndexCommit.getSegmentsFileName()); - try (IndexWriter writer = newAppendingIndexWriter(directory, lastIndexCommit)) { - writer.setLiveCommitData(lastIndexCommit.getUserData().entrySet()); - writer.commit(); - } + directory.sync(latestSegmentInfos.files(true)); + directory.syncMetaData(); + cleanupAndPreserveLatestCommitPoint("After commit", latestSegmentInfos); } finally { metadataLock.writeLock().unlock(); } @@ -1033,6 +1075,11 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger) throws IOException { + return loadMetadata(segmentInfos, directory, logger, false); + } + + static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger, boolean ignoreSegmentsFile) + throws IOException { long numDocs = Lucene.getNumDocs(segmentInfos); Map commitUserDataBuilder = new HashMap<>(); commitUserDataBuilder.putAll(segmentInfos.getUserData()); @@ -1067,8 +1114,10 @@ static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory director if (maxVersion == null) { maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } - final String segmentsFile = segmentInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + if (ignoreSegmentsFile == false) { + final String segmentsFile = segmentInfos.getSegmentsFileName(); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + } return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs); } @@ -1148,7 +1197,6 @@ public Map asMap() { * Helper method used to group store files according to segment and commit. * * @see MetadataSnapshot#recoveryDiff(MetadataSnapshot) - * @see MetadataSnapshot#segmentReplicationDiff(MetadataSnapshot) */ private Iterable> getGroupedFilesIterable() { final Map> perSegment = new HashMap<>(); @@ -1241,51 +1289,6 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { return recoveryDiff; } - /** - * Segment Replication method - * Returns a diff between the two snapshots that can be used for getting list of files to copy over to a replica for segment replication. The given snapshot is treated as the - * target and this snapshot as the source. The returned diff will hold a list of files that are: - *
    - *
  • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
  • - *
  • different: they exist in both snapshots but their they are not identical
  • - *
  • missing: files that exist in the source but not in the target
  • - *
- */ - public RecoveryDiff segmentReplicationDiff(MetadataSnapshot recoveryTargetSnapshot) { - final List identical = new ArrayList<>(); - final List different = new ArrayList<>(); - final List missing = new ArrayList<>(); - final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : getGroupedFilesIterable()) { - identicalFiles.clear(); - boolean consistent = true; - for (StoreFileMetadata meta : segmentFiles) { - StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); - if (storeFileMetadata == null) { - // Do not consider missing files as inconsistent in SegRep as replicas may lag while primary updates - // documents and generate new files specific to a segment - missing.add(meta); - } else if (storeFileMetadata.isSame(meta) == false) { - consistent = false; - different.add(meta); - } else { - identicalFiles.add(meta); - } - } - if (consistent) { - identical.addAll(identicalFiles); - } else { - different.addAll(identicalFiles); - } - } - RecoveryDiff recoveryDiff = new RecoveryDiff( - Collections.unmodifiableList(identical), - Collections.unmodifiableList(different), - Collections.unmodifiableList(missing) - ); - return recoveryDiff; - } - /** * Returns the number of files in this snapshot */ diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java index a73a3b54184da..48c2dfd30f589 100644 --- a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -10,13 +10,12 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.transport.TransportResponse; import java.io.IOException; -import java.util.Set; +import java.util.Map; /** * Response returned from a {@link SegmentReplicationSource} that includes the file metadata, and SegmentInfos @@ -28,52 +27,41 @@ public class CheckpointInfoResponse extends TransportResponse { private final ReplicationCheckpoint checkpoint; - private final Store.MetadataSnapshot snapshot; + private final Map metadataMap; private final byte[] infosBytes; - // pendingDeleteFiles are segments that have been merged away in the latest in memory SegmentInfos - // but are still referenced by the latest commit point (Segments_N). - private final Set pendingDeleteFiles; public CheckpointInfoResponse( final ReplicationCheckpoint checkpoint, - final Store.MetadataSnapshot snapshot, - final byte[] infosBytes, - final Set additionalFiles + final Map metadataMap, + final byte[] infosBytes ) { this.checkpoint = checkpoint; - this.snapshot = snapshot; + this.metadataMap = metadataMap; this.infosBytes = infosBytes; - this.pendingDeleteFiles = additionalFiles; } public CheckpointInfoResponse(StreamInput in) throws IOException { this.checkpoint = new ReplicationCheckpoint(in); - this.snapshot = new Store.MetadataSnapshot(in); + this.metadataMap = in.readMap(StreamInput::readString, StoreFileMetadata::new); this.infosBytes = in.readByteArray(); - this.pendingDeleteFiles = in.readSet(StoreFileMetadata::new); } @Override public void writeTo(StreamOutput out) throws IOException { checkpoint.writeTo(out); - snapshot.writeTo(out); + out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); out.writeByteArray(infosBytes); - out.writeCollection(pendingDeleteFiles); } public ReplicationCheckpoint getCheckpoint() { return checkpoint; } - public Store.MetadataSnapshot getSnapshot() { - return snapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index db3f87201b774..91b8243440ac5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -133,12 +133,7 @@ public void messageReceived(CheckpointInfoRequest request, TransportChannel chan ); final CopyState copyState = ongoingSegmentReplications.prepareForReplication(request, segmentSegmentFileChunkWriter); channel.sendResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); timer.stop(); logger.trace( diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 6a9406aca13b9..26bec2203c599 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -23,6 +23,7 @@ import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; @@ -37,12 +38,9 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; +import java.util.Collections; +import java.util.Map; /** * Represents the target of a replication event. @@ -178,9 +176,7 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener filesToFetch = new ArrayList(diff.missing); - Set storeFiles = new HashSet<>(Arrays.asList(store.directory().listAll())); - final Set pendingDeleteFiles = checkpointInfo.getPendingDeleteFiles() - .stream() - .filter(f -> storeFiles.contains(f.name()) == false) - .collect(Collectors.toSet()); - - filesToFetch.addAll(pendingDeleteFiles); - logger.trace("Files to fetch {}", filesToFetch); - - for (StoreFileMetadata file : filesToFetch) { + for (StoreFileMetadata file : diff.missing) { state.getIndex().addFileDetail(file.name(), file.length(), false); } // always send a req even if not fetching files so the primary can clear the copyState for this shard. state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, store, getFilesListener); + source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), diff.missing, store, getFilesListener); } private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, ActionListener listener) { @@ -231,7 +217,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, responseCheckpoint.getSegmentsGen() ); indexShard.finalizeReplication(infos, responseCheckpoint.getSeqNo()); - store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", store.getMetadata(infos)); + store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", infos); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are @@ -280,11 +266,13 @@ private ChecksumIndexInput toIndexInput(byte[] input) { ); } - Store.MetadataSnapshot getMetadataSnapshot() throws IOException { + Map getMetadataMap() throws IOException { if (indexShard.getSegmentInfosSnapshot() == null) { - return Store.MetadataSnapshot.EMPTY; + return Collections.emptyMap(); + } + try (final GatedCloseable snapshot = indexShard.getSegmentInfosSnapshot()) { + return store.getSegmentMetadataMap(snapshot.get()); } - return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java index c0e0b4dee2b3f..1dd0886fd2f36 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -15,14 +15,12 @@ import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.HashSet; -import java.util.Set; +import java.util.Map; /** * An Opensearch-specific version of Lucene's CopyState class that @@ -37,8 +35,7 @@ public class CopyState extends AbstractRefCounted { private final ReplicationCheckpoint requestedReplicationCheckpoint; /** Actual ReplicationCheckpoint returned by the shard */ private final ReplicationCheckpoint replicationCheckpoint; - private final Store.MetadataSnapshot metadataSnapshot; - private final HashSet pendingDeleteFiles; + private final Map metadataMap; private final byte[] infosBytes; private GatedCloseable commitRef; private final IndexShard shard; @@ -49,7 +46,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar this.shard = shard; this.segmentInfosRef = shard.getSegmentInfosSnapshot(); SegmentInfos segmentInfos = this.segmentInfosRef.get(); - this.metadataSnapshot = shard.store().getMetadata(segmentInfos); + this.metadataMap = shard.store().getSegmentMetadataMap(segmentInfos); this.replicationCheckpoint = new ReplicationCheckpoint( shard.shardId(), shard.getOperationPrimaryTerm(), @@ -57,18 +54,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar shard.getProcessedLocalCheckpoint(), segmentInfos.getVersion() ); - - // Send files that are merged away in the latest SegmentInfos but not in the latest on disk Segments_N. - // This ensures that the store on replicas is in sync with the store on primaries. this.commitRef = shard.acquireLastIndexCommit(false); - Store.MetadataSnapshot metadata = shard.store().getMetadata(this.commitRef.get()); - final Store.RecoveryDiff diff = metadata.recoveryDiff(this.metadataSnapshot); - this.pendingDeleteFiles = new HashSet<>(diff.missing); - if (this.pendingDeleteFiles.isEmpty()) { - // If there are no additional files we can release the last commit immediately. - this.commitRef.close(); - this.commitRef = null; - } ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); // resource description and name are not used, but resource description cannot be null @@ -95,18 +81,14 @@ public ReplicationCheckpoint getCheckpoint() { return replicationCheckpoint; } - public Store.MetadataSnapshot getMetadataSnapshot() { - return metadataSnapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } - public IndexShard getShard() { return shard; } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 88a3bdad53d0c..3af882a8087ec 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -489,12 +489,7 @@ private void resolveCheckpointInfoResponseListener(ActionListener additionalSegments = new ArrayList<>(); + for (String file : store.directory().listAll()) { + if (commitMetadata.contains(file) == false) { + additionalSegments.add(file); + } + } + assertFalse(additionalSegments.isEmpty()); + + // clean up everything not in the latest commit point. + store.cleanupAndPreserveLatestCommitPoint("test", store.readLastCommittedSegmentsInfo()); + + // we want to ensure commitMetadata files are preserved after calling cleanup + for (String existingFile : store.directory().listAll()) { + assertTrue(commitMetadata.contains(existingFile)); + assertFalse(additionalSegments.contains(existingFile)); + } + deleteContent(store.directory()); + IOUtils.close(store); + } + + public void testGetSegmentMetadataMap() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store( + shardId, + SEGMENT_REPLICATION_INDEX_SETTINGS, + new NIOFSDirectory(createTempDir()), + new DummyShardLock(shardId) + ); + store.createEmpty(Version.LATEST); + final Map metadataSnapshot = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + // no docs indexed only _N file exists. + assertTrue(metadataSnapshot.isEmpty()); + + // commit some docs to create a commit point. + commitRandomDocs(store); + + final Map snapshotAfterCommit = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + assertFalse(snapshotAfterCommit.isEmpty()); + assertFalse(snapshotAfterCommit.keySet().stream().anyMatch((name) -> name.startsWith(IndexFileNames.SEGMENTS))); + store.close(); + } + + public void testSegmentReplicationDiff() { + final String segmentName = "_0.si"; + final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(segmentName, 1L, "0", Version.LATEST); + // source has file target is missing. + Store.RecoveryDiff diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Collections.emptyMap()); + assertEquals(List.of(SEGMENT_FILE), diff.missing); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // target has file not on source. + diff = Store.segmentReplicationDiff(Collections.emptyMap(), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // source and target have identical file. + diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertEquals(List.of(SEGMENT_FILE), diff.identical); + + // source has diff copy of same file as target. + StoreFileMetadata SOURCE_DIFF_FILE = new StoreFileMetadata(segmentName, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentName, SOURCE_DIFF_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertEquals(List.of(SOURCE_DIFF_FILE), diff.different); + assertTrue(diff.identical.isEmpty()); + + // ignore _N files if included in source map. + final String segmentsFile = IndexFileNames.SEGMENTS.concat("_2"); + StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(segmentsFile, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentsFile, SEGMENTS_FILE), Collections.emptyMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + } + + private void commitRandomDocs(Store store) throws IOException { + IndexWriter writer = indexRandomDocs(store); + writer.commit(); + writer.close(); + } + + private IndexWriter indexRandomDocs(Store store) throws IOException { IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec( TestUtil.getDefaultCodec() ); + indexWriterConfig.setCommitOnClose(false); indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig); int docs = 1 + random().nextInt(100); @@ -1171,21 +1281,6 @@ public void testcleanupAndPreserveLatestCommitPoint() throws IOException { ); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); - writer.commit(); - writer.close(); - - Store.MetadataSnapshot commitMetadata = store.getMetadata(); - - Store.MetadataSnapshot refreshMetadata = Store.MetadataSnapshot.EMPTY; - - store.cleanupAndPreserveLatestCommitPoint("test", refreshMetadata); - - // we want to ensure commitMetadata files are preserved after calling cleanup - for (String existingFile : store.directory().listAll()) { - assert (commitMetadata.contains(existingFile) == true); - } - - deleteContent(store.directory()); - IOUtils.close(store); + return writer; } } diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index f49ee0471b5e8..bd3106454f49b 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -11,28 +11,30 @@ import org.junit.Assert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -55,15 +57,18 @@ public class OngoingSegmentReplicationsTests extends IndexShardTestCase { private GetSegmentFilesRequest getSegmentFilesRequest; - final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final Settings settings = Settings.builder() + .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); @Override public void setUp() throws Exception { super.setUp(); - primary = newStartedShard(true); - replica = newShard(primary.shardId(), false); + primary = newStartedShard(true, settings); + replica = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replica, primary, true); replicaDiscoveryNode = replica.recoveryState().getTargetNode(); primaryDiscoveryNode = replica.recoveryState().getSourceNode(); @@ -93,6 +98,8 @@ public void tearDown() throws Exception { } public void testPrepareAndSendSegments() throws IOException { + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -112,17 +119,14 @@ public void testPrepareAndSendSegments() throws IOException { 1L, replica.routingEntry().allocationId().getId(), replicaDiscoveryNode, - new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + new ArrayList<>(copyState.getMetadataMap().values()), testCheckpoint ); - final Collection expectedFiles = List.copyOf(primary.store().getMetadata().asMap().values()); replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - assertEquals(1, getSegmentFilesResponse.files.size()); - assertEquals(1, expectedFiles.size()); - assertTrue(expectedFiles.stream().findFirst().get().isSame(getSegmentFilesResponse.files.get(0))); + assertEquals(copyState.getMetadataMap().size(), getSegmentFilesResponse.files.size()); assertEquals(0, copyState.refCount()); assertFalse(replications.isInCopyStateMap(request.getCheckpoint())); assertEquals(0, replications.size()); @@ -181,7 +185,7 @@ public void testCancelReplication_AfterSendFilesStarts() throws IOException, Int 1L, replica.routingEntry().allocationId().getId(), replicaDiscoveryNode, - new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + new ArrayList<>(copyState.getMetadataMap().values()), testCheckpoint ); replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 5f6ec7e505805..cde5cd980a91d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -19,6 +19,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -76,7 +77,7 @@ public void testSendFiles() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, @@ -137,6 +138,9 @@ public void onFailure(Exception e) { } public void testSendFileFails() throws IOException { + // index some docs on the primary so a segment is created. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); chunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> listener.onFailure( new OpenSearchException("Test") ); @@ -153,7 +157,7 @@ public void testSendFileFails() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 4bfdd81d50a1e..6183f1e5d9dfb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -121,9 +121,7 @@ public void testCheckpointInfo() { public void onResponse(CheckpointInfoResponse response) { assertEquals(testCheckpoint, response.getCheckpoint()); assertNotNull(response.getInfosBytes()); - // CopyStateTests sets up one pending delete file and one committed segments file - assertEquals(1, response.getPendingDeleteFiles().size()); - assertEquals(1, response.getSnapshot().size()); + assertEquals(1, response.getMetadataMap().size()); } @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f2eb635f24bbf..7437cb22e44d1 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -62,7 +62,7 @@ public void setUp() throws Exception { .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - primaryShard = newStartedShard(true); + primaryShard = newStartedShard(true, settings); replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replicaShard, primaryShard, true); checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 11217a46b3c69..f8341573770a6 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -18,7 +18,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.Directory; @@ -51,7 +50,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Random; import java.util.Arrays; @@ -71,26 +69,13 @@ public class SegmentReplicationTargetTests extends IndexShardTestCase { private ReplicationCheckpoint repCheckpoint; private ByteBuffersDataOutput buffer; - private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); - private static final StoreFileMetadata SEGMENTS_FILE_DIFF = new StoreFileMetadata( - IndexFileNames.SEGMENTS, - 5L, - "different", - Version.LATEST - ); - private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); + private static final String SEGMENT_NAME = "_0.si"; + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(SEGMENT_NAME, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE_DIFF = new StoreFileMetadata(SEGMENT_NAME, 5L, "different", Version.LATEST); - private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), - null, - 0 - ); + private static final Map SI_SNAPSHOT = Map.of(SEGMENT_FILE.name(), SEGMENT_FILE); - private static final Store.MetadataSnapshot SI_SNAPSHOT_DIFFERENT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE_DIFF.name(), SEGMENTS_FILE_DIFF), - null, - 0 - ); + private static final Map SI_SNAPSHOT_DIFFERENT = Map.of(SEGMENT_FILE_DIFF.name(), SEGMENT_FILE_DIFF); private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", @@ -135,7 +120,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -146,9 +131,8 @@ public void getSegmentFiles( Store store, ActionListener listener ) { - assertEquals(filesToFetch.size(), 2); - assert (filesToFetch.contains(SEGMENTS_FILE)); - assert (filesToFetch.contains(PENDING_DELETE_FILE)); + assertEquals(1, filesToFetch.size()); + assert (filesToFetch.contains(SEGMENT_FILE)); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -230,7 +214,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -273,7 +257,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -318,7 +302,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -362,7 +346,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -380,7 +364,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener.class ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(SI_SNAPSHOT_DIFFERENT); + when(segrepTarget.getMetadataMap()).thenReturn(SI_SNAPSHOT_DIFFERENT); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { @@ -413,9 +397,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1), buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE)) - ); + listener.onResponse(new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1).asMap(), buffer.toArrayCopy())); } @Override @@ -434,7 +416,7 @@ public void getSegmentFiles( ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(storeMetadataSnapshots.get(0)); + when(segrepTarget.getMetadataMap()).thenReturn(storeMetadataSnapshots.get(0).asMap()); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index a6f0cf7e98411..77a4a6d22039e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -22,7 +22,6 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; -import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -32,6 +31,7 @@ public class CopyStateTests extends IndexShardTestCase { private static final long EXPECTED_LONG_VALUE = 1L; private static final ShardId TEST_SHARD_ID = new ShardId("testIndex", "testUUID", 0); private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata("_0.si", 1L, "0", Version.LATEST); private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); private static final Store.MetadataSnapshot COMMIT_SNAPSHOT = new Store.MetadataSnapshot( @@ -41,7 +41,7 @@ public class CopyStateTests extends IndexShardTestCase { ); private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), + Map.of(SEGMENT_FILE.name(), SEGMENT_FILE), null, 0 ); @@ -61,10 +61,6 @@ public void testCopyStateCreation() throws IOException { // version was never set so this should be zero assertEquals(0, checkpoint.getSegmentInfosVersion()); assertEquals(EXPECTED_LONG_VALUE, checkpoint.getPrimaryTerm()); - - Set pendingDeleteFiles = copyState.getPendingDeleteFiles(); - assertEquals(1, pendingDeleteFiles.size()); - assertTrue(pendingDeleteFiles.contains(PENDING_DELETE_FILE)); } public static IndexShard createMockIndexShard() throws IOException { @@ -78,7 +74,7 @@ public static IndexShard createMockIndexShard() throws IOException { SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); when(mockShard.getSegmentInfosSnapshot()).thenReturn(new GatedCloseable<>(testSegmentInfos, () -> {})); - when(mockStore.getMetadata(testSegmentInfos)).thenReturn(SI_SNAPSHOT); + when(mockStore.getSegmentMetadataMap(testSegmentInfos)).thenReturn(SI_SNAPSHOT.asMap()); IndexCommit mockIndexCommit = mock(IndexCommit.class); when(mockShard.acquireLastIndexCommit(false)).thenReturn(new GatedCloseable<>(mockIndexCommit, () -> {})); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 0838a1fe87aa4..073dc4b84472e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -134,6 +134,7 @@ import java.io.IOException; import java.util.ArrayList; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -1200,12 +1201,7 @@ public void getCheckpointMetadata( try { final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); copyState.decRef(); } catch (IOException e) { From 1889d966542355aea0a3839931cf4525e429208d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 13:37:51 -0400 Subject: [PATCH 11/11] Bump org.gradle.test-retry from 1.4.0 to 1.4.1 (#4411) * Bump org.gradle.test-retry from 1.4.0 to 1.4.1 Bumps org.gradle.test-retry from 1.4.0 to 1.4.1. --- updated-dependencies: - dependency-name: org.gradle.test-retry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2b6528783a39..d04b754531b0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) +### Dependencies +- Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 @@ -69,4 +71,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/build.gradle b/build.gradle index a1f4f2d04883a..56c5610124958 100644 --- a/build.gradle +++ b/build.gradle @@ -56,7 +56,7 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.10.0" apply false - id "org.gradle.test-retry" version "1.4.0" apply false + id "org.gradle.test-retry" version "1.4.1" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' }