diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index cbaa7fa10fbb6..892d04936b743 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -7,6 +7,10 @@ on: - 'dependabot/**' pull_request_target: types: [opened, synchronize, reopened] + workflow_run: + workflows: ["Gradle Precommit"] + types: + - completed jobs: gradle-check: diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 9860be4159b83..e264d65cdf191 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,9 +1,12 @@ name: Gradle Precommit on: [pull_request] - + jobs: precommit: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v2 - name: Set up JDK 11 @@ -13,4 +16,4 @@ jobs: distribution: adopt - name: Run Gradle run: | - ./gradlew precommit --parallel + ./gradlew javadoc precommit --parallel diff --git a/CHANGELOG.md b/CHANGELOG.md index 33aea5ed569f7..d74668074e150 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,11 @@ # CHANGELOG + Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] + ### Added + - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -21,6 +24,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Add snapshot and restore tests for segment replication feature ([#3993](https://github.com/opensearch-project/OpenSearch/pull/3993)) - Added missing javadocs for `:example-plugins` modules ([#4540](https://github.com/opensearch-project/OpenSearch/pull/4540)) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) +- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) +- Added precommit support for windows ([#4676](https://github.com/opensearch-project/OpenSearch/pull/4676)) +- Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) +- Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) +- Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Add a new node role which can provide search capability for remote shard ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) ### Dependencies @@ -43,8 +51,10 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) +- Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) ### Changed + - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -59,8 +69,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) +- Include Windows OS in Bootstrap initializeNatives() check for definitelyRunningAsRoot() ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) +- Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) ### Deprecated @@ -69,6 +82,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) ### Fixed + - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -93,6 +107,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- Fixing PIT flaky tests ([4632](https://github.com/opensearch-project/OpenSearch/pull/4632)) - Fixed day of year defaulting for round up parser ([#4627](https://github.com/opensearch-project/OpenSearch/pull/4627)) - Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) @@ -102,12 +117,17 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) +- Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) +- Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) ### Security + - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] + ### Added + - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -123,11 +143,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed ### Fixed + - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3ef3c6f9faf49..0b23631816fe9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,7 +110,7 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' - api "net.java.dev.jna:jna:5.11.0" + api "net.java.dev.jna:jna:5.12.1" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'org.jdom:jdom2:2.0.6.1' api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a9545f5738efa..bf72245c63918 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -19,7 +19,7 @@ slf4j = 1.7.36 asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.5.0 +jna = 5.12.1 netty = 4.1.79.Final joda = 2.10.13 diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index cbb4db10cd519..be9b614a8720f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -71,7 +72,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException { + public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -90,9 +91,11 @@ public void testDeleteAllAndListAllPits() throws IOException { List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); + CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { + countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -100,6 +103,7 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { + countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } @@ -123,6 +127,7 @@ public void onFailure(Exception e) { }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); // validate no pits case getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); assertTrue(getAllPitResponse.getPitInfos().size() == 0); diff --git a/release-notes/opensearch.release-notes-1.3.6.md b/release-notes/opensearch.release-notes-1.3.6.md new file mode 100644 index 0000000000000..63db5390ae5e2 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.6.md @@ -0,0 +1,10 @@ +## 2022-10-04 Version 1.3.6 Release Notes + +### Upgrades +* Update to Netty 4.1.80.Final ([#4379](https://github.com/opensearch-project/OpenSearch/pull/4379)) +* Revert to Netty 4.1.79.Final ([#4433](https://github.com/opensearch-project/OpenSearch/pull/4433)) +* Bumps jackson and snakeyaml dependencies ([#4600](https://github.com/opensearch-project/OpenSearch/pull/4600)) +* Bump commons-configuration2 from 2.7 to 2.8.0 in /plugins/repository-hdfs ([#4646](https://github.com/opensearch-project/OpenSearch/pull/4646)) + +### Bug Fixes +* Set analyzer to regex query string search ([#4220](https://github.com/opensearch-project/OpenSearch/pull/4220)) diff --git a/server/licenses/jna-5.12.1.jar.sha1 b/server/licenses/jna-5.12.1.jar.sha1 new file mode 100644 index 0000000000000..0d42f248c1afd --- /dev/null +++ b/server/licenses/jna-5.12.1.jar.sha1 @@ -0,0 +1 @@ +b1e93a735caea94f503e95e6fe79bf9cdc1e985d \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 deleted file mode 100644 index 5621dfc743dd0..0000000000000 --- a/server/licenses/jna-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 9c53a0f449a40..1bffe9ec98ec5 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -99,7 +99,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java new file mode 100644 index 0000000000000..09b92c00684f4 --- /dev/null +++ b/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.bootstrap; + +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; +import com.sun.jna.Structure; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; + +import java.util.List; + +/** + * Library for Windows/Advapi32 + * + * @opensearch.internal + */ +final class JNAAdvapi32Library { + + private static final Logger logger = LogManager.getLogger(JNAAdvapi32Library.class); + + private static final class Holder { + private static final JNAAdvapi32Library instance = new JNAAdvapi32Library(); + } + + private JNAAdvapi32Library() { + if (Constants.WINDOWS) { + try { + Native.register("advapi32"); + logger.debug("windows/Advapi32 library loaded"); + } catch (NoClassDefFoundError e) { + logger.warn("JNA not found. native methods and handlers will be disabled."); + } catch (UnsatisfiedLinkError e) { + logger.warn("unable to link Windows/Advapi32 library. native methods and handlers will be disabled."); + } + } + } + + static JNAAdvapi32Library getInstance() { + return Holder.instance; + } + + /** + * Access right required to query an access token. + * Used by {@link #OpenProcessToken(Pointer, int, PointerByReference)}. + * + * https://learn.microsoft.com/en-us/windows/win32/secauthz/access-rights-for-access-token-objects + */ + public static final int TOKEN_QUERY = 0x0008; + + /** + * TOKEN_INFORMATION_CLASS enumeration value that specifies the type of information being assigned to or retrieved from an access token. + * Used by {@link #GetTokenInformation(Pointer, int, Structure, int, IntByReference)}. + * + * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-token_information_class + */ + public static final int TOKEN_ELEVATION = 0x14; + + /** + * Native call to the Advapi32 API to open the access token associated with a process. + * + * @param processHandle Handle to the process whose access token is opened. + * The process must have the PROCESS_QUERY_INFORMATION access permission. + * @param desiredAccess Specifies an access mask that specifies the requested types of access to the access token. + * These requested access types are compared with the discretionary access control list (DACL) of the token to determine which accesses are granted or denied. + * @param tokenHandle Pointer to a handle that identifies the newly opened access token when the function returns. + * @return If the function succeeds, the return value is true. + * If the function fails, the return value is false. + * To get extended error information, call GetLastError. + */ + native boolean OpenProcessToken(Pointer processHandle, int desiredAccess, PointerByReference tokenHandle); + + /** + * Retrieves a specified type of information about an access token. + * The calling process must have appropriate access rights to obtain the information. + * + * @param tokenHandle Handle to an access token from which information is retrieved. + * If TokenInformationClass specifies TokenSource, the handle must have TOKEN_QUERY_SOURCE access. + * For all other TokenInformationClass values, the handle must have TOKEN_QUERY access. + * @param tokenInformationClass Specifies a value from the TOKEN_INFORMATION_CLASS enumerated type to identify the type of information the function retrieves. + * @param tokenInformation Pointer to a buffer the function fills with the requested information. + * The structure put into this buffer depends upon the type of information specified by the TokenInformationClass parameter. + * @param tokenInformationLength Specifies the size, in bytes, of the buffer pointed to by the TokenInformation parameter. + * If TokenInformation is NULL, this parameter must be zero. + * @param returnLength Pointer to a variable that receives the number of bytes needed for the buffer pointed to by the TokenInformation parameter. + * If this value is larger than the value specified in the TokenInformationLength parameter, the function fails and stores no data in the buffer. + * @return If the function succeeds, the return value is true. + * If the function fails, the return value is zero. + * To get extended error information, call GetLastError. + */ + native boolean GetTokenInformation( + Pointer tokenHandle, + int tokenInformationClass, + Structure tokenInformation, + int tokenInformationLength, + IntByReference returnLength + ); + + /** + * The TOKEN_ELEVATION structure indicates whether a token has elevated privileges. + * + * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-token_elevation + */ + public static class TokenElevation extends Structure { + /** + * A nonzero value if the token has elevated privileges; otherwise, a zero value. + */ + public int TokenIsElevated; + + @Override + protected List getFieldOrder() { + return List.of("TokenIsElevated"); + } + } +} diff --git a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java index 033596033b0fd..12f65f8e4a216 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java @@ -35,14 +35,19 @@ import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.WString; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; +import org.opensearch.bootstrap.JNAAdvapi32Library.TokenElevation; import org.opensearch.monitor.jvm.JvmInfo; import java.nio.file.Path; +import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_ELEVATION; +import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_QUERY; import static org.opensearch.bootstrap.JNAKernel32Library.SizeT; /** @@ -185,13 +190,44 @@ static String rlimitToString(long value) { /** Returns true if user is root, false if not, or if we don't know */ static boolean definitelyRunningAsRoot() { - if (Constants.WINDOWS) { - return false; // don't know - } try { + if (Constants.WINDOWS) { + JNAKernel32Library kernel32 = JNAKernel32Library.getInstance(); + JNAAdvapi32Library advapi32 = JNAAdvapi32Library.getInstance(); + + // Fetch a pseudo handle for the current process. + // The pseudo handle need not be closed when it is no longer needed (calling CloseHandle is a no-op). + Pointer process = kernel32.GetCurrentProcess(); + PointerByReference hToken = new PointerByReference(); + // Fetch the process token for the current process, for which we know we have the access rights + if (!advapi32.OpenProcessToken(process, TOKEN_QUERY, hToken)) { + logger.warn( + "Unable to open the Process Token for the current process [" + JNACLibrary.strerror(Native.getLastError()) + "]" + ); + return false; + } + // We have successfully opened the token. Ensure it gets closed after we use it. + try { + TokenElevation elevation = new TokenElevation(); + IntByReference returnLength = new IntByReference(); + if (!advapi32.GetTokenInformation(hToken.getValue(), TOKEN_ELEVATION, elevation, elevation.size(), returnLength)) { + logger.warn( + "Unable to get TokenElevation information for the current process [" + + JNACLibrary.strerror(Native.getLastError()) + + "]" + ); + return false; + } + // Nonzero value means elevated privileges + return elevation.TokenIsElevated > 0; + } finally { + kernel32.CloseHandle(hToken.getValue()); + } + } + // For unix-based systems, check effective user ID of process return JNACLibrary.geteuid() == 0; } catch (UnsatisfiedLinkError e) { - // this will have already been logged by Kernel32Library, no need to repeat it + // this will have already been logged by Native Library, no need to repeat it return false; } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index c31ea53a5fd16..fcab411f073ba 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -14,6 +14,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -481,4 +482,53 @@ public void onFailure(Exception e) { } }; } + + public void startRecommissionAction(final ActionListener listener) { + /* + * For abandoned requests, we might not really know if it actually restored the exclusion list. + * And can land up in cases where even after recommission, exclusions are set(which is unexpected). + * And by definition of OpenSearch - Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. + * And hence it is safe to remove the exclusion if any. User should make conscious choice before decommissioning awareness attribute. + */ + decommissionController.clearVotingConfigExclusion(new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.info("successfully cleared voting config exclusion for deleting the decommission."); + deleteDecommissionState(listener); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failure in clearing voting config during delete_decommission request.", e); + listener.onFailure(e); + } + }, false); + } + + void deleteDecommissionState(ActionListener listener) { + clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + logger.info("Deleting the decommission attribute from the cluster state"); + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute. [{}]", source), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + // Cluster state processed for deleting the decommission attribute. + assert newState.metadata().decommissionAttributeMetadata() == null; + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } } diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 29d34aee42382..f98669210140d 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -130,6 +130,13 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(ignoreUnmapped); } + /** + * Returns path of the nested query. + */ + public String path() { + return path; + } + /** * Returns the nested query to execute. */ diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 8b5343184dabd..5b2dce277189c 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.decommission; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; import org.opensearch.OpenSearchTimeoutException; @@ -45,6 +46,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -53,6 +55,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; import static org.opensearch.cluster.ClusterState.builder; import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; @@ -213,24 +216,25 @@ public void testTimesOut() throws InterruptedException { nodesToBeRemoved.add(clusterService.state().nodes().get("node13")); nodesToBeRemoved.add(clusterService.state().nodes().get("node14")); nodesToBeRemoved.add(clusterService.state().nodes().get("node15")); + final AtomicReference exceptionReference = new AtomicReference<>(); decommissionController.removeDecommissionedNodes( nodesToBeRemoved, "unit-test-timeout", - TimeValue.timeValueMillis(2), - new ActionListener() { + TimeValue.timeValueMillis(0), + new ActionListener<>() { @Override - public void onResponse(Void unused) { - fail("response shouldn't have been called"); - } + public void onResponse(Void unused) {} @Override public void onFailure(Exception e) { - assertThat(e, instanceOf(OpenSearchTimeoutException.class)); - assertThat(e.getMessage(), containsString("waiting for removal of decommissioned nodes")); + exceptionReference.set(e); countDownLatch.countDown(); } } ); + MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(OpenSearchTimeoutException.class)); + MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("waiting for removal of decommissioned nodes")); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 61a6662ef0cf3..840ce1634a68e 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -11,9 +11,13 @@ import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -30,6 +34,7 @@ import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.util.Collections; @@ -201,6 +206,86 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testClearClusterDecommissionState() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + ClusterState state = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) + .build(); + + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse decommissionResponse) { + DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); + assertNull(metadata); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("on failure shouldn't have been called"); + countDownLatch.countDown(); + } + }; + + this.decommissionService.deleteDecommissionState(listener); + + // Decommission Attribute should be removed. + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + public void testDeleteDecommissionAttributeClearVotingExclusion() { + TransportService mockTransportService = Mockito.mock(TransportService.class); + Mockito.when(mockTransportService.getLocalNode()).thenReturn(Mockito.mock(DiscoveryNode.class)); + DecommissionService decommissionService = new DecommissionService( + Settings.EMPTY, + clusterSettings, + clusterService, + mockTransportService, + threadPool, + allocationService + ); + decommissionService.startRecommissionAction(Mockito.mock(ActionListener.class)); + + ArgumentCaptor clearVotingConfigExclusionsRequestArgumentCaptor = ArgumentCaptor.forClass( + ClearVotingConfigExclusionsRequest.class + ); + Mockito.verify(mockTransportService) + .sendRequest( + Mockito.any(DiscoveryNode.class), + Mockito.anyString(), + clearVotingConfigExclusionsRequestArgumentCaptor.capture(), + Mockito.any(TransportResponseHandler.class) + ); + + ClearVotingConfigExclusionsRequest request = clearVotingConfigExclusionsRequestArgumentCaptor.getValue(); + assertFalse(request.getWaitForRemoval()); + } + + public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + assertTrue(response.isAcknowledged()); + assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("On Failure shouldn't have been called"); + countDownLatch.countDown(); + } + }; + decommissionService.deleteDecommissionState(listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index abc3e0bb8c4c3..91cfc90a9cebb 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -147,6 +147,10 @@ public void testSerializationBWC() throws IOException { } } + public void testPath() { + assertEquals("nested1", createTestQueryBuilder().path()); + } + public void testValidate() { QueryBuilder innerQuery = RandomQueryBuilder.createQuery(random()); IllegalArgumentException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index a23e4141a78e4..b11a80b9d8726 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -98,7 +98,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); ExecutionException ex = expectThrows(ExecutionException.class, execute::get); assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); - assertTrue(ex.getMessage().contains("Partial shards failure")); validatePitStats("index", 0, 0); return super.onNodeStopped(nodeName); }