From ab6849f317d343bbbaba9f076b5a50fa9a95eb96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Tue, 27 Sep 2022 22:28:55 +0200 Subject: [PATCH 001/122] Further simplification of the ZIP publication implementation (#4360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A follow-up of PR #4156 that brings in further simplification of the ZIP publication implementation and new tests. It is possible to remove some of the initialization code because the work is already handled by other library under the hood (most likely by NebulaPublishPlugin). For instance the condition to test the `groupId` presence: `if (groupId == null)` was pointless. It was never `true`. If the `project.group` is not defined the publication task fails with an exception, if there is a custom `groupId` value setup in publication config then it overrides the `project.group` as well. Tests are provided to cover these use cases. As for the new tests they cover the following cases: - verify that the task "publishToMavenLocal" gets expected results. The inspiration for this comes from https://github.com/opensearch-project/opensearch-plugin-template-java/pull/35 - verify that applying only the 'opensearch.pluginzip' is enough, because both the NebulaPublish and MavenPublishPlugin plugins are added explicitly and tasks are chained correctly - verify that if the plugin is applied but no publication is defined then a message is printed for the user Signed-off-by: Lukáš Vlček Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../opensearch/gradle/pluginzip/Publish.java | 83 +++---- .../gradle/pluginzip/PublishTests.java | 235 +++++++++++++++++- .../pluginzip/customizedGroupValue.gradle | 1 - .../customizedInvalidGroupValue.gradle | 1 - .../pluginzip/missingGroupValue.gradle | 1 - .../pluginzip/missingPOMEntity.gradle | 1 - .../pluginzip/missingPublications.gradle | 21 ++ .../pluginzip/publishToMavenLocal.gradle | 47 ++++ ...onValue.gradle => useDefaultValues.gradle} | 5 +- 10 files changed, 330 insertions(+), 66 deletions(-) create mode 100644 buildSrc/src/test/resources/pluginzip/missingPublications.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/publishToMavenLocal.gradle rename buildSrc/src/test/resources/pluginzip/{groupAndVersionValue.gradle => useDefaultValues.gradle} (90%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b15a28c55c43..42f86d6d89a5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - PUT api for weighted shard routing ([#4272](https://github.com/opensearch-project/OpenSearch/pull/4272)) - Unmute test RelocationIT.testRelocationWhileIndexingRandom ([#4580](https://github.com/opensearch-project/OpenSearch/pull/4580)) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) +- Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) ### Deprecated diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 70c3737ba3674..6dc7d660922b2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -9,30 +9,31 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.logging.Logger; -import org.gradle.api.logging.Logging; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; -import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; import java.nio.file.Path; import org.gradle.api.Task; +import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - private static final Logger LOGGER = Logging.getLogger(Publish.class); - - public final static String EXTENSION_NAME = "zipmavensettings"; + // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; - public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; - public final static String LOCALMAVEN = "publishToMavenLocal"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; - public String zipDistributionLocation = "/build/distributions/"; + // TODO: Does the path ^^ need to use platform dependant file separators ? + + private boolean isZipPublicationPresent(Project project) { + PublishingExtension pe = project.getExtensions().findByType(PublishingExtension.class); + if (pe == null) { + return false; + } + return pe.getPublications().findByName(PUBLICATION_NAME) != null; + } - public static void configMaven(Project project) { + private void addLocalMavenRepo(Project project) { final Path buildDirectory = project.getRootDir().toPath(); - project.getPluginManager().apply(MavenPublishPlugin.class); project.getExtensions().configure(PublishingExtension.class, publishing -> { publishing.repositories(repositories -> { repositories.maven(maven -> { @@ -40,52 +41,40 @@ public static void configMaven(Project project) { maven.setUrl(buildDirectory.toString() + LOCAL_STAGING_REPO_PATH); }); }); + }); + } + + private void addZipArtifact(Project project) { + project.getExtensions().configure(PublishingExtension.class, publishing -> { publishing.publications(publications -> { MavenPublication mavenZip = (MavenPublication) publications.findByName(PUBLICATION_NAME); - - if (mavenZip == null) { - mavenZip = publications.create(PUBLICATION_NAME, MavenPublication.class); + if (mavenZip != null) { + mavenZip.artifact(project.getTasks().named("bundlePlugin")); } - - String groupId = mavenZip.getGroupId(); - if (groupId == null) { - // The groupId is not customized thus we get the value from "project.group". - // See https://docs.gradle.org/current/userguide/publishing_maven.html#sec:identity_values_in_the_generated_pom - groupId = getProperty("group", project); - } - - String artifactId = project.getName(); - String pluginVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(groupId); - mavenZip.setArtifactId(artifactId); - mavenZip.setVersion(pluginVersion); }); }); } - static String getProperty(String name, Project project) { - if (project.hasProperty(name)) { - Object property = project.property(name); - if (property != null) { - return property.toString(); - } - } - return null; - } - @Override public void apply(Project project) { + project.getPluginManager().apply("nebula.maven-base-publish"); + project.getPluginManager().apply(MavenPublishPlugin.class); project.afterEvaluate(evaluatedProject -> { - configMaven(project); - Task validatePluginZipPom = project.getTasks().findByName("validatePluginZipPom"); - if (validatePluginZipPom != null) { - project.getTasks().getByName("validatePluginZipPom").dependsOn("generatePomFileForNebulaPublication"); - } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + if (isZipPublicationPresent(project)) { + addLocalMavenRepo(project); + addZipArtifact(project); + Task validatePluginZipPom = project.getTasks().findByName("validatePluginZipPom"); + if (validatePluginZipPom != null) { + validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); + } + Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() + .findByName("publishPluginZipPublicationToZipStagingRepository"); + if (publishPluginZipPublicationToZipStagingRepository != null) { + publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + } + } else { + project.getLogger() + .warn(String.format("Plugin 'opensearch.pluginzip' is applied but no '%s' publication is defined.", PUBLICATION_NAME)); } }); } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 06632e2dfa476..2ca0e507acb44 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -8,6 +8,8 @@ package org.opensearch.gradle.pluginzip; +import org.gradle.api.Project; +import org.gradle.testfixtures.ProjectBuilder; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.UnexpectedBuildFailure; @@ -54,20 +56,152 @@ public void tearDown() { projectDir.delete(); } + /** + * This test is used to verify that adding the 'opensearch.pluginzip' to the project + * adds some other transitive plugins and tasks under the hood. This is basically + * a behavioral test of the {@link Publish#apply(Project)} method. + * + * This is equivalent of having a build.gradle script with just the following section: + *
+     *     plugins {
+     *       id 'opensearch.pluginzip'
+     *     }
+     * 
+ */ + @Test + public void applyZipPublicationPluginNoConfig() { + // All we do here is creating an empty project and applying the Publish plugin. + Project project = ProjectBuilder.builder().build(); + project.getPluginManager().apply(Publish.class); + + // WARNING: ===================================================================== + // All the following tests will work only before the gradle project is evaluated. + // There are some methods that will cause the project to be evaluated, such as: + // project.getTasksByName() + // After the project is evaluated there are more tasks found in the project, like + // the [assemble, build, ...] and other standard tasks. + // This can potentially break in future gradle versions (?) + // =============================================================================== + + assertEquals( + "The Publish plugin is applied which adds total of five tasks from Nebula and MavenPublishing plugins.", + 5, + project.getTasks().size() + ); + + // Tasks applied from "nebula.maven-base-publish" + assertTrue( + project.getTasks() + .findByName("generateMetadataFileForNebulaPublication") instanceof org.gradle.api.publish.tasks.GenerateModuleMetadata + ); + assertTrue( + project.getTasks() + .findByName("generatePomFileForNebulaPublication") instanceof org.gradle.api.publish.maven.tasks.GenerateMavenPom + ); + assertTrue( + project.getTasks() + .findByName("publishNebulaPublicationToMavenLocal") instanceof org.gradle.api.publish.maven.tasks.PublishToMavenLocal + ); + + // Tasks applied from MavenPublishPlugin + assertTrue(project.getTasks().findByName("publishToMavenLocal") instanceof org.gradle.api.DefaultTask); + assertTrue(project.getTasks().findByName("publish") instanceof org.gradle.api.DefaultTask); + + // And we miss the pluginzip publication task (because no publishing was defined for it) + assertNull(project.getTasks().findByName(ZIP_PUBLISH_TASK)); + + // We have the following publishing plugins + assertEquals(4, project.getPlugins().size()); + // ... of the following types: + assertNotNull( + "Project is expected to have OpenSearch pluginzip Publish plugin", + project.getPlugins().findPlugin(org.opensearch.gradle.pluginzip.Publish.class) + ); + assertNotNull( + "Project is expected to have MavenPublishPlugin (applied from OpenSearch pluginzip plugin)", + project.getPlugins().findPlugin(org.gradle.api.publish.maven.plugins.MavenPublishPlugin.class) + ); + assertNotNull( + "Project is expected to have Publishing plugin (applied from MavenPublishPublish plugin)", + project.getPlugins().findPlugin(org.gradle.api.publish.plugins.PublishingPlugin.class) + ); + assertNotNull( + "Project is expected to have nebula MavenBasePublishPlugin plugin (applied from OpenSearch pluginzip plugin)", + project.getPlugins().findPlugin(nebula.plugin.publishing.maven.MavenBasePublishPlugin.class) + ); + } + + /** + * Verify that if the zip publication is configured then relevant tasks are chained correctly. + * This test that the dependsOn() is applied correctly. + */ + @Test + public void applyZipPublicationPluginWithConfig() throws IOException, URISyntaxException, InterruptedException { + + /* ------------------------------- + // The ideal approach would be to create a project (via ProjectBuilder) with publishzip plugin, + // have it evaluated (API call) and then check if there are tasks that the plugin uses to hookup into + // and how these tasks are chained. The problem is that there is a known gradle issue (#20301) that does + // not allow for it ATM. If, however, it is fixed in the future the following is the code that can + // be used... + + Project project = ProjectBuilder.builder().build(); + project.getPluginManager().apply(Publish.class); + // add publications via API + + // evaluate the project + ((DefaultProject)project).evaluate(); + + // - Check that "validatePluginZipPom" and/or "publishPluginZipPublicationToZipStagingRepository" + // tasks have dependencies on "generatePomFileForNebulaPublication". + // - Check that there is the staging repository added. + + // However, due to known issue(1): https://github.com/gradle/gradle/issues/20301 + // it is impossible to reach to individual tasks and work with them. + // (1): https://docs.gradle.org/7.4/release-notes.html#known-issues + + // I.e.: The following code throws exception, basically any access to individual tasks fails. + project.getTasks().getByName("validatePluginZipPom"); + ------------------------------- */ + + // Instead, we run the gradle project via GradleRunner (this way we get fully evaluated project) + // and using the minimal possible configuration (missingPOMEntity) we test that as soon as the zip publication + // configuration is specified then all the necessary tasks are hooked up and executed correctly. + // However, this does not test execution order of the tasks. + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", ZIP_PUBLISH_TASK/*, "-m"*/); + BuildResult result = runner.build(); + + assertEquals(SUCCESS, result.task(":" + "bundlePlugin").getOutcome()); + assertEquals(SUCCESS, result.task(":" + "generatePomFileForNebulaPublication").getOutcome()); + assertEquals(SUCCESS, result.task(":" + "generatePomFileForPluginZipPublication").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + } + + /** + * If the plugin is used but relevant publication is not defined then a message is printed. + */ + @Test + public void missingPublications() throws IOException, URISyntaxException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPublications.gradle", "build", "-m"); + BuildResult result = runner.build(); + + assertTrue(result.getOutput().contains("Plugin 'opensearch.pluginzip' is applied but no 'pluginZip' publication is defined.")); + } + @Test public void missingGroupValue() throws IOException, URISyntaxException, XmlPullParserException { - GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle"); + GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle", "build", ZIP_PUBLISH_TASK); Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); assertTrue(e.getMessage().contains("Invalid publication 'pluginZip': groupId cannot be empty.")); } /** - * This would be the most common use case where user declares Maven publication entity with basic info - * and the resulting POM file will use groupId and version values from the Gradle project object. + * This would be the most common use case where user declares Maven publication entity with minimal info + * and the resulting POM file will use artifactId, groupId and version values based on the Gradle project object. */ @Test - public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPullParserException { - GradleRunner runner = prepareGradleRunnerFromTemplate("groupAndVersionValue.gradle"); + public void useDefaultValues() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("useDefaultValues.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ @@ -108,7 +242,7 @@ public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPu ).exists() ); - // Parse the maven file and validate the groupID + // Parse the maven file and validate default values MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( @@ -130,6 +264,10 @@ public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPu ); assertEquals(model.getVersion(), "2.0.0.0"); assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getArtifactId(), PROJECT_NAME); + assertNull(model.getName()); + assertNull(model.getDescription()); + assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } @@ -139,7 +277,7 @@ public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPu */ @Test public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullParserException { - GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle"); + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ @@ -186,7 +324,7 @@ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullPa */ @Test public void customizedGroupValue() throws IOException, URISyntaxException, XmlPullParserException { - GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle"); + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ @@ -223,21 +361,94 @@ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPu */ @Test public void customizedInvalidGroupValue() throws IOException, URISyntaxException { - GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle"); + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle", "build", ZIP_PUBLISH_TASK); Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); assertTrue( e.getMessage().contains("Invalid publication 'pluginZip': groupId ( ) is not a valid Maven identifier ([A-Za-z0-9_\\-.]+).") ); } - private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws IOException, URISyntaxException { + /** + * This test verifies that use of the pluginZip does not clash with other maven publication plugins. + * It covers the case when user calls the "publishToMavenLocal" task. + */ + @Test + public void publishToMavenLocal() throws IOException, URISyntaxException, XmlPullParserException { + // By default, the "publishToMavenLocal" publishes artifacts to a local m2 repo, typically + // found in `~/.m2/repository`. But this is not practical for this unit test at all. We need to point + // the 'maven-publish' plugin to a custom m2 repo located in temporary directory associated with this + // test case instead. + // + // According to Gradle documentation this should be possible by proper configuration of the publishing + // task (https://docs.gradle.org/current/userguide/publishing_maven.html#publishing_maven:install). + // But for some reason this never worked as expected and artifacts created during this test case + // were always pushed into the default local m2 repository (ie: `~/.m2/repository`). + // The only workaround that seems to work is to pass "-Dmaven.repo.local" property via runner argument. + // (Kudos to: https://stackoverflow.com/questions/72265294/gradle-publishtomavenlocal-specify-custom-directory) + // + // The temporary directory that is used as the local m2 repository is created via in task "prepareLocalMVNRepo". + GradleRunner runner = prepareGradleRunnerFromTemplate( + "publishToMavenLocal.gradle", + String.join(File.separator, "-Dmaven.repo.local=" + projectDir.getRoot(), "build", "local-staging-repo"), + "build", + "prepareLocalMVNRepo", + "publishToMavenLocal" + ); + BuildResult result = runner.build(); + + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + "publishToMavenLocal").getOutcome()); + + // Parse the maven file and validate it + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + + // The "publishToMavenLocal" task will run ALL maven publications, hence we can expect the ZIP publication + // present as well: https://docs.gradle.org/current/userguide/publishing_maven.html#publishing_maven:tasks + assertEquals(model.getArtifactId(), PROJECT_NAME); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getPackaging(), "zip"); + + // We have two publications in the build.gradle file, both are "MavenPublication" based. + // Both the mavenJava and pluginZip publications publish to the same location (coordinates) and + // artifacts (the POM file) overwrite each other. However, we can verify that the Zip plugin is + // the last one and "wins" over the mavenJava. + assertEquals(model.getDescription(), "pluginZip publication"); + } + + /** + * A helper method for use cases + * + * @param templateName The name of the file (from "pluginzip" folder) to use as a build.gradle for the test + * @param gradleArguments Optional CLI parameters to pass into Gradle runner + */ + private GradleRunner prepareGradleRunnerFromTemplate(String templateName, String... gradleArguments) throws IOException, + URISyntaxException { useTemplateFile(projectDir.newFile("build.gradle"), templateName); prepareGradleFilesAndSources(); GradleRunner runner = GradleRunner.create() .forwardOutput() .withPluginClasspath() - .withArguments("build", ZIP_PUBLISH_TASK) + .withArguments(gradleArguments) .withProjectDir(projectDir.getRoot()); return runner; @@ -246,7 +457,7 @@ private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws private void prepareGradleFilesAndSources() throws IOException { // A dummy "source" file that is processed with bundlePlugin and put into a ZIP artifact file File bundleFile = new File(projectDir.getRoot(), PROJECT_NAME + "-source.txt"); - Path zipFile = Files.createFile(bundleFile.toPath()); + Files.createFile(bundleFile.toPath()); // Setting a project name via settings.gradle file writeString(projectDir.newFile("settings.gradle"), "rootProject.name = '" + PROJECT_NAME + "'"); } diff --git a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle index 1bde3edda2d91..94f03132faa80 100644 --- a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle +++ b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle @@ -1,6 +1,5 @@ plugins { id 'java-gradle-plugin' - id 'nebula.maven-base-publish' id 'opensearch.pluginzip' } diff --git a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle index b6deeeb12ca6a..6f2abbdacd6d4 100644 --- a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle +++ b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle @@ -1,6 +1,5 @@ plugins { id 'java-gradle-plugin' - id 'nebula.maven-base-publish' id 'opensearch.pluginzip' } diff --git a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle index 602c178ea1a5b..8fcd1d6600b5a 100644 --- a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle +++ b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle @@ -1,6 +1,5 @@ plugins { id 'java-gradle-plugin' - id 'nebula.maven-base-publish' id 'opensearch.pluginzip' } diff --git a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle index 2cc67c2e98954..394bc53622769 100644 --- a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle +++ b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle @@ -1,6 +1,5 @@ plugins { id 'java-gradle-plugin' - id 'nebula.maven-base-publish' id 'opensearch.pluginzip' } diff --git a/buildSrc/src/test/resources/pluginzip/missingPublications.gradle b/buildSrc/src/test/resources/pluginzip/missingPublications.gradle new file mode 100644 index 0000000000000..ba6b33ad86463 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingPublications.gradle @@ -0,0 +1,21 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +//publishing { +// publications { +// pluginZip(MavenPublication) { +// } +// } +//} diff --git a/buildSrc/src/test/resources/pluginzip/publishToMavenLocal.gradle b/buildSrc/src/test/resources/pluginzip/publishToMavenLocal.gradle new file mode 100644 index 0000000000000..8d248dbe08a42 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/publishToMavenLocal.gradle @@ -0,0 +1,47 @@ +plugins { + // The java-gradle-plugin adds a new publication called 'pluginMaven' that causes some warnings because it + // clashes a bit with other publications defined in this file. If you are running at the --info level then you can + // expect some warning like the following: + // "Multiple publications with coordinates 'org.custom.group:sample-plugin:2.0.0.0' are published to repository 'mavenLocal'." + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +// A task to prepare directory for a temporary maven local repository +tasks.register('prepareLocalMVNRepo') { + dependsOn ':bundlePlugin' + doFirst { + File localMVNRepo = new File (layout.buildDirectory.get().getAsFile().getPath(), 'local-staging-repo') + System.out.println('Creating temporary folder for mavenLocal repo: '+ localMVNRepo.toString()) + System.out.println("Success: " + localMVNRepo.mkdir()) + } +} + +publishing { + publications { + // Plugin zip publication + pluginZip(MavenPublication) { + pom { + url = 'http://www.example.com/library' + description = 'pluginZip publication' + } + } + // Standard maven publication + mavenJava(MavenPublication) { + pom { + url = 'http://www.example.com/library' + description = 'mavenJava publication' + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle b/buildSrc/src/test/resources/pluginzip/useDefaultValues.gradle similarity index 90% rename from buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle rename to buildSrc/src/test/resources/pluginzip/useDefaultValues.gradle index bdab385f6082c..52f1c042fd47c 100644 --- a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle +++ b/buildSrc/src/test/resources/pluginzip/useDefaultValues.gradle @@ -1,6 +1,5 @@ plugins { id 'java-gradle-plugin' - id 'nebula.maven-base-publish' id 'opensearch.pluginzip' } @@ -18,8 +17,8 @@ publishing { publications { pluginZip(MavenPublication) { pom { - name = "sample-plugin" - description = "pluginDescription" +// name = "plugin name" +// description = "plugin description" licenses { license { name = "The Apache License, Version 2.0" From ebbddd3db38b12e9f05a8488c3e257a163b75582 Mon Sep 17 00:00:00 2001 From: msfroh Date: Wed, 28 Sep 2022 19:33:51 -0700 Subject: [PATCH 002/122] Add missing Javadoc tag descriptions in unit tests (#4629) * Add missing Javadoc tag descriptions in unit tests Using JDK14 (downloaded from https://jdk.java.net/archive/), I tried running ./gradlew check, and received errors like: server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java:440: warning: no description for @throws The build then failed with "warnings found and -Werror specified". I tried again with JDK17 and the build passed. According to DEVELOPER_GUIDE.md, we should be able to build with JDK14, I added the missing Javadoc tag descriptions. Signed-off-by: Michael Froh * Update CHANGELOG.md Signed-off-by: Michael Froh Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../metrics/GeoBoundsGeoShapeAggregatorTests.java | 4 ++-- .../replication/SegmentReplicationTargetTests.java | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42f86d6d89a5b..64a964e89c35c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add BWC version 2.3.1 ([#4513](https://github.com/opensearch-project/OpenSearch/pull/4513)) - [Segment Replication] Add snapshot and restore tests for segment replication feature ([#3993](https://github.com/opensearch-project/OpenSearch/pull/3993)) - Added missing javadocs for `:example-plugins` modules ([#4540](https://github.com/opensearch-project/OpenSearch/pull/4540)) +- Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java index 68d9434631364..d449d72f0b148 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java @@ -68,7 +68,7 @@ protected List getSearchPlugins() { /** * Testing Empty aggregator results. * - * @throws Exception + * @throws Exception if an error occurs accessing the index */ public void testEmpty() throws Exception { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { @@ -93,7 +93,7 @@ public void testEmpty() throws Exception { /** * Testing GeoBoundAggregator for random shapes which are indexed. * - * @throws Exception + * @throws Exception if an error occurs accessing the index */ public void testRandom() throws Exception { final int numDocs = randomIntBetween(50, 100); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index f8341573770a6..34ca94bba02f3 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -381,7 +381,7 @@ public void onFailure(Exception e) { /** * This tests ensures that new files generated on primary (due to delete operation) are not considered missing on replica - * @throws IOException + * @throws IOException if an indexing operation fails or segment replication fails */ public void test_MissingFiles_NotCausingFailure() throws IOException { int docCount = 1 + random().nextInt(10); @@ -435,9 +435,9 @@ public void onFailure(Exception e) { /** * Generates a list of Store.MetadataSnapshot with two elements where second snapshot has extra files due to delete * operation. A list of snapshots is returned so that identical files have same checksum. - * @param docCount - * @return - * @throws IOException + * @param docCount the number of documents to index in the first snapshot + * @return a list of Store.MetadataSnapshot with two elements where second snapshot has extra files due to delete + * @throws IOException if one of the indexing operations fails */ private List generateStoreMetadataSnapshot(int docCount) throws IOException { List docList = new ArrayList<>(); From cb5e16d794fefc88cf2d67d99d3b765bb44434fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Sep 2022 10:52:42 -0400 Subject: [PATCH 003/122] Bump reactor-netty-http from 1.0.18 to 1.0.23 in /plugins/repository-azure (#4594) * Bump reactor-netty-http in /plugins/repository-azure Bumps [reactor-netty-http](https://github.com/reactor/reactor-netty) from 1.0.18 to 1.0.23. - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.18...v1.0.23) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty-http dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-azure/build.gradle | 2 +- .../licenses/reactor-netty-http-1.0.18.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.0.23.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 64a964e89c35c..ed13d3f1108fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 +- Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 ### Dependencies @@ -109,4 +110,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 0f0c0183a69dd..17d93a7e24e61 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -59,7 +59,7 @@ dependencies { api 'io.projectreactor:reactor-core:3.4.23' api 'io.projectreactor.netty:reactor-netty:1.0.18' api 'io.projectreactor.netty:reactor-netty-core:1.0.22' - api 'io.projectreactor.netty:reactor-netty-http:1.0.18' + api 'io.projectreactor.netty:reactor-netty-http:1.0.23' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 deleted file mode 100644 index 43599c0b6c691..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a34930cbd46b53ffdb19d2089605f39589eb2b99 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 new file mode 100644 index 0000000000000..0b26b80fe3915 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 @@ -0,0 +1 @@ +63932f2b675f451135986b3723a12d45e818b170 \ No newline at end of file From 71577245ad992194d166d689fed59d905778f9fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Sep 2022 11:32:14 -0400 Subject: [PATCH 004/122] Bump jettison from 1.5.0 to 1.5.1 in /plugins/discovery-azure-classic (#4592) * Bump jettison from 1.5.0 to 1.5.1 in /plugins/discovery-azure-classic Bumps [jettison](https://github.com/jettison-json/jettison) from 1.5.0 to 1.5.1. - [Release notes](https://github.com/jettison-json/jettison/releases) - [Commits](https://github.com/jettison-json/jettison/compare/jettison-1.5.0...jettison-1.5.1) --- updated-dependencies: - dependency-name: org.codehaus.jettison:jettison dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/discovery-azure-classic/build.gradle | 2 +- .../discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 | 1 - .../discovery-azure-classic/licenses/jettison-1.5.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.5.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ed13d3f1108fd..7e1ca05fee06a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 +- Bumps `jettison` from 1.5.0 to 1.5.1 ### Dependencies diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 5755ff55bfff9..8ca9491f834a6 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.5.0' + api 'org.codehaus.jettison:jettison:1.5.1' api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 deleted file mode 100644 index ec93f83474541..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -933c7df7a4b78c9a9322f431014ea699b1fc0cc0 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.5.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.5.1.jar.sha1 new file mode 100644 index 0000000000000..29227ed427953 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/jettison-1.5.1.jar.sha1 @@ -0,0 +1 @@ +d8918f348f234f5046bd39ea1ed9fc91deac402f \ No newline at end of file From 2c0277be599d1683e189349fd80be38fabb519b9 Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Thu, 29 Sep 2022 08:44:35 -0700 Subject: [PATCH 005/122] Fix invalid search location of JDK for arm64 (#4613) Related commit: 0e9f74e35f1255cb9ec45be3d8960aad195a9f6e Signed-off-by: Heemin Kim Signed-off-by: Heemin Kim --- CHANGELOG.md | 3 ++- buildSrc/src/main/java/org/opensearch/gradle/Jdk.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e1ca05fee06a..a1f9e411674e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,13 +76,14 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. - [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) -- [Bug]: gradle check failing with java heap OutOfMemoryError (([#4328](https://github.com/opensearch-project/OpenSearch/ +- [Bug]: gradle check failing with java heap OutOfMemoryError ([#4328](https://github.com/opensearch-project/OpenSearch/)) - `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) - Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA ([#4469](https://github.com/opensearch-project/OpenSearch/pull/4469)) - Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java index 4b289de3f0619..1073ba01dafab 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java @@ -128,7 +128,7 @@ public void setArchitecture(final String architecture) { "unknown architecture [" + jdkArchitecture + "] for jdk [" + name + "], must be one of " + ALLOWED_ARCHITECTURES ); } - this.architecture.set(architecture); + this.architecture.set(jdkArchitecture); } public String getBaseVersion() { From 2fe6f9a28fc1c79d06757692b301ab7bd612083e Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Thu, 29 Sep 2022 12:02:37 -0700 Subject: [PATCH 006/122] Remove unused object and import from TransportClusterAllocationExplainAction (#4639) Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- CHANGELOG.md | 3 ++- .../allocation/TransportClusterAllocationExplainAction.java | 4 ---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1f9e411674e4..d241a94a0dcb3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) +- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) @@ -112,4 +113,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 7060ac43af7f9..a50c3b2d6f100 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -50,7 +50,6 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.RoutingAllocation.DebugMode; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; -import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -77,7 +76,6 @@ public class TransportClusterAllocationExplainAction extends TransportClusterMan private final ClusterInfoService clusterInfoService; private final SnapshotsInfoService snapshotsInfoService; private final AllocationDeciders allocationDeciders; - private final ShardsAllocator shardAllocator; private final AllocationService allocationService; @Inject @@ -90,7 +88,6 @@ public TransportClusterAllocationExplainAction( ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, AllocationDeciders allocationDeciders, - ShardsAllocator shardAllocator, AllocationService allocationService ) { super( @@ -105,7 +102,6 @@ public TransportClusterAllocationExplainAction( this.clusterInfoService = clusterInfoService; this.snapshotsInfoService = snapshotsInfoService; this.allocationDeciders = allocationDeciders; - this.shardAllocator = shardAllocator; this.allocationService = allocationService; } From 3b49c0eece3ba4c115f3302c3d4ede52ecef66ca Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 29 Sep 2022 20:54:27 -0400 Subject: [PATCH 007/122] Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance (#4638) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../opensearch/http/netty4/Netty4HttpServerTransport.java | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d241a94a0dcb3..562f03514942c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Unmute test RelocationIT.testRelocationWhileIndexingRandom ([#4580](https://github.com/opensearch-project/OpenSearch/pull/4580)) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) +- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) ### Deprecated diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 1e0a4d89f2fd5..e3fde75e5b551 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -332,8 +332,10 @@ public ChannelHandler configureServerChannelHandler() { return new HttpChannelHandler(this, handlingSettings); } - static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); - static final AttributeKey HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-server-channel"); + protected static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); + protected static final AttributeKey HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance( + "opensearch-http-server-channel" + ); protected static class HttpChannelHandler extends ChannelInitializer { From 551c2d84a259d967daaa60e21303c544f658de82 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Fri, 30 Sep 2022 00:24:47 -0700 Subject: [PATCH 008/122] Bumping hadoop-hdfs version to 3.3.4 (#4644) * Updating hadoop-hdfs version Signed-off-by: Sarat Vemulapalli * Adding changelog Signed-off-by: Sarat Vemulapalli Signed-off-by: Sarat Vemulapalli --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 | 1 - .../repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 | 1 + .../licenses/hadoop-client-runtime-3.3.3.jar.sha1 | 1 - .../licenses/hadoop-client-runtime-3.3.4.jar.sha1 | 1 + plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 | 1 - plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 9 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 562f03514942c..22db239ac36f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `com.diffplug.spotless` from 6.10.0 to 6.11.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) +- Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 50784c809e657..6fd91f78a63e6 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -48,7 +48,7 @@ opensearchplugin { } versions << [ - 'hadoop3': '3.3.3' + 'hadoop3': '3.3.4' ] testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs" diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 deleted file mode 100644 index 8df133d0bd106..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0593aed2d4df9bcee507550913d29d589ebd84a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 new file mode 100644 index 0000000000000..dd79b8a10cebc --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 @@ -0,0 +1 @@ +6339a8f7279310c8b1f7ef314b592d8c71ca72ef \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 deleted file mode 100644 index f980eebc7a46c..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52619ecfb0225d7ae67b15264521064824ac57ca \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1 new file mode 100644 index 0000000000000..32d58d1dc501a --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1 @@ -0,0 +1 @@ +21f7a9a2da446f1e5b3e5af16ebf956d3ee43ee0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 deleted file mode 100644 index 463b7415e4c4b..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4d199760c11d47f90e12fe3882e2b24c77e4eb5 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 new file mode 100644 index 0000000000000..532d25a44531f --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 @@ -0,0 +1 @@ +036ef2f86dc44410d2bb5d54ce40435d2484d9a5 \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 73aca2a6ca02b..40e3a1dc0587d 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' dependencies { - api("org.apache.hadoop:hadoop-minicluster:3.3.3") { + api("org.apache.hadoop:hadoop-minicluster:3.3.4") { exclude module: 'websocket-client' } api "org.apache.commons:commons-compress:1.21" From 54f8fdd98e8cc15a6fbed95bbe751ad5819b4f87 Mon Sep 17 00:00:00 2001 From: Stevan Buzejic <30922513+stevanbz@users.noreply.github.com> Date: Fri, 30 Sep 2022 18:04:45 +0200 Subject: [PATCH 009/122] Bugfix/496 rollover alias with filters (#4499) * 496: Added filters, indexRouting and searchRouting when doing rollover on alias Signed-off-by: Stevan Buzejic * 496: Removed unused map property in metada alias rollover tests Signed-off-by: Stevan Buzejic Signed-off-by: Stevan Buzejic Co-authored-by: Stevan Buzejic --- CHANGELOG.md | 1 + .../rollover/MetadataRolloverService.java | 36 ++++- .../cluster/metadata/AliasAction.java | 12 ++ .../MetadataRolloverServiceTests.java | 147 +++++++++++++++++- 4 files changed, 187 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22db239ac36f8..4f4124a24cf01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) +- [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java index a40ac35091082..c3862bb115b21 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -188,7 +188,7 @@ private RolloverResult rolloverAlias( ClusterState newState = createIndexService.applyCreateIndexRequest(currentState, createIndexClusterStateRequest, silent); newState = indexAliasesService.applyAliasActions( newState, - rolloverAliasToNewIndex(sourceIndexName, rolloverIndexName, explicitWriteIndex, aliasMetadata.isHidden(), aliasName) + rolloverAliasToNewIndex(sourceIndexName, rolloverIndexName, explicitWriteIndex, aliasMetadata, aliasName) ); RolloverInfo rolloverInfo = new RolloverInfo(aliasName, metConditions, threadPool.absoluteTimeInMillis()); @@ -309,20 +309,46 @@ static List rolloverAliasToNewIndex( String oldIndex, String newIndex, boolean explicitWriteIndex, - @Nullable Boolean isHidden, + AliasMetadata aliasMetadata, String alias ) { + String filterAsString = aliasMetadata.getFilter() != null ? aliasMetadata.getFilter().string() : null; + if (explicitWriteIndex) { return Collections.unmodifiableList( Arrays.asList( - new AliasAction.Add(newIndex, alias, null, null, null, true, isHidden), - new AliasAction.Add(oldIndex, alias, null, null, null, false, isHidden) + new AliasAction.Add( + newIndex, + alias, + filterAsString, + aliasMetadata.getIndexRouting(), + aliasMetadata.getSearchRouting(), + true, + aliasMetadata.isHidden() + ), + new AliasAction.Add( + oldIndex, + alias, + filterAsString, + aliasMetadata.getIndexRouting(), + aliasMetadata.getSearchRouting(), + false, + aliasMetadata.isHidden() + ) ) ); } else { return Collections.unmodifiableList( Arrays.asList( - new AliasAction.Add(newIndex, alias, null, null, null, null, isHidden), + new AliasAction.Add( + newIndex, + alias, + filterAsString, + aliasMetadata.getIndexRouting(), + aliasMetadata.getSearchRouting(), + null, + aliasMetadata.isHidden() + ), new AliasAction.Remove(oldIndex, alias, null) ) ); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java index a53f8411b2549..46702a0d78caf 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java @@ -138,6 +138,18 @@ public String getAlias() { return alias; } + public String getFilter() { + return filter; + } + + public String getSearchRouting() { + return searchRouting; + } + + public String getIndexRouting() { + return indexRouting; + } + public Boolean writeIndex() { return writeIndex; } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index d54c17041e96f..8521673743d23 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -126,7 +126,13 @@ public void testRolloverAliasActions() { String sourceIndex = randomAlphaOfLength(10); String targetIndex = randomAlphaOfLength(10); - List actions = MetadataRolloverService.rolloverAliasToNewIndex(sourceIndex, targetIndex, false, null, sourceAlias); + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + false, + createDefaultAliasMetadata(sourceAlias, null), + sourceAlias + ); assertThat(actions, hasSize(2)); boolean foundAdd = false; boolean foundRemove = false; @@ -149,7 +155,13 @@ public void testRolloverAliasActionsWithExplicitWriteIndex() { String sourceAlias = randomAlphaOfLength(10); String sourceIndex = randomAlphaOfLength(10); String targetIndex = randomAlphaOfLength(10); - List actions = MetadataRolloverService.rolloverAliasToNewIndex(sourceIndex, targetIndex, true, null, sourceAlias); + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + true, + createDefaultAliasMetadata(sourceAlias, null), + sourceAlias + ); assertThat(actions, hasSize(2)); boolean foundAddWrite = false; @@ -172,11 +184,64 @@ public void testRolloverAliasActionsWithExplicitWriteIndex() { assertTrue(foundRemoveWrite); } + public void testRolloverAliasActionsWithFilterAndExplicitWriteIndex() { + String sourceAlias = randomAlphaOfLength(10); + String sourceIndex = randomAlphaOfLength(10); + String targetIndex = randomAlphaOfLength(10); + String indexRouting = randomAlphaOfLength(10); + String sourceRouting = randomAlphaOfLength(10); + AliasMetadata aliasMetadata = createAliasMetadata( + sourceAlias, + Collections.singletonMap(randomAlphaOfLength(2), randomAlphaOfLength(2)), + indexRouting, + sourceRouting, + true + ); + + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + true, + aliasMetadata, + sourceAlias + ); + + assertThat(actions, hasSize(2)); + boolean foundAddWrite = false; + boolean foundRemoveWrite = false; + for (AliasAction action : actions) { + AliasAction.Add addAction = (AliasAction.Add) action; + if (action.getIndex().equals(targetIndex)) { + assertEquals(sourceAlias, addAction.getAlias()); + assertEquals(aliasMetadata.filter().string(), addAction.getFilter()); + assertEquals(indexRouting, addAction.getIndexRouting()); + assertEquals(sourceRouting, addAction.getSearchRouting()); + + assertTrue(addAction.writeIndex()); + foundAddWrite = true; + } else if (action.getIndex().equals(sourceIndex)) { + assertEquals(sourceAlias, addAction.getAlias()); + assertFalse(addAction.writeIndex()); + foundRemoveWrite = true; + } else { + throw new AssertionError("Unknown index [" + action.getIndex() + "]"); + } + } + assertTrue(foundAddWrite); + assertTrue(foundRemoveWrite); + } + public void testRolloverAliasActionsWithHiddenAliasAndExplicitWriteIndex() { String sourceAlias = randomAlphaOfLength(10); String sourceIndex = randomAlphaOfLength(10); String targetIndex = randomAlphaOfLength(10); - List actions = MetadataRolloverService.rolloverAliasToNewIndex(sourceIndex, targetIndex, true, true, sourceAlias); + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + true, + createDefaultAliasMetadata(sourceAlias, true), + sourceAlias + ); assertThat(actions, hasSize(2)); boolean foundAddWrite = false; @@ -202,11 +267,66 @@ public void testRolloverAliasActionsWithHiddenAliasAndExplicitWriteIndex() { assertTrue(foundRemoveWrite); } + public void testRolloverAliasActionsWithFilterAndHiddenAliasAndImplicitWriteIndex() { + String sourceAlias = randomAlphaOfLength(10); + String sourceIndex = randomAlphaOfLength(10); + String targetIndex = randomAlphaOfLength(10); + String indexRouting = randomAlphaOfLength(10); + String sourceRouting = randomAlphaOfLength(10); + AliasMetadata aliasMetadata = createAliasMetadata( + sourceAlias, + Collections.singletonMap(randomAlphaOfLength(2), randomAlphaOfLength(2)), + indexRouting, + sourceRouting, + true + ); + + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + false, + aliasMetadata, + sourceAlias + ); + + assertThat(actions, hasSize(2)); + boolean foundAddWrite = false; + boolean foundRemoveWrite = false; + for (AliasAction action : actions) { + if (action.getIndex().equals(targetIndex)) { + assertThat(action, instanceOf(AliasAction.Add.class)); + AliasAction.Add addAction = (AliasAction.Add) action; + assertEquals(sourceAlias, addAction.getAlias()); + assertThat(addAction.writeIndex(), nullValue()); + assertTrue(addAction.isHidden()); + assertEquals(aliasMetadata.filter().string(), addAction.getFilter()); + assertEquals(indexRouting, addAction.getIndexRouting()); + assertEquals(sourceRouting, addAction.getSearchRouting()); + foundAddWrite = true; + } else if (action.getIndex().equals(sourceIndex)) { + assertThat(action, instanceOf(AliasAction.Remove.class)); + AliasAction.Remove removeAction = (AliasAction.Remove) action; + assertEquals(sourceAlias, removeAction.getAlias()); + foundRemoveWrite = true; + } else { + throw new AssertionError("Unknown index [" + action.getIndex() + "]"); + } + } + assertTrue(foundAddWrite); + assertTrue(foundRemoveWrite); + } + public void testRolloverAliasActionsWithHiddenAliasAndImplicitWriteIndex() { String sourceAlias = randomAlphaOfLength(10); String sourceIndex = randomAlphaOfLength(10); String targetIndex = randomAlphaOfLength(10); - List actions = MetadataRolloverService.rolloverAliasToNewIndex(sourceIndex, targetIndex, false, true, sourceAlias); + List actions = MetadataRolloverService.rolloverAliasToNewIndex( + sourceIndex, + targetIndex, + false, + createDefaultAliasMetadata(sourceAlias, true), + sourceAlias + ); assertThat(actions, hasSize(2)); boolean foundAddWrite = false; @@ -1010,4 +1130,23 @@ private static IndexMetadata createMetadata(String indexName) { .settings(settings) .build(); } + + private static AliasMetadata createDefaultAliasMetadata(String alias, Boolean isHidden) { + return AliasMetadata.builder(alias).isHidden(isHidden).build(); + } + + private static AliasMetadata createAliasMetadata( + String alias, + Map filter, + String indexRouting, + String searchRouting, + Boolean isHidden + ) { + return AliasMetadata.builder(alias) + .isHidden(isHidden) + .filter(filter) + .indexRouting(indexRouting) + .searchRouting(searchRouting) + .build(); + } } From ff2d5be004509adae62ca39ff6f0efcdf47d950e Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Fri, 30 Sep 2022 09:59:49 -0700 Subject: [PATCH 010/122] Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() (#4582) Signed-off-by: Tianli Feng --- CHANGELOG.md | 1 + .../opensearch/cluster/node/DiscoveryNode.java | 17 ++++++++++++----- .../src/main/java/org/opensearch/node/Node.java | 2 ++ .../node/DiscoveryNodeRoleSettingTests.java | 3 +-- .../cluster/node/DiscoveryNodeTests.java | 6 +++--- .../opensearch/node/NodeRoleSettingsTests.java | 6 ++---- 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f4124a24cf01..305d00262e821 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) +- Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index beb69e7f1cfaa..199d79070c050 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -616,11 +616,18 @@ public static void setAdditionalRoles(final Set additionalRol + "], roles by name abbreviation [" + roleNameAbbreviationToPossibleRoles + "]"; - // TODO: Remove the Map 'roleNameToPossibleRolesWithMaster' and let 'roleMap = roleNameToPossibleRoles', after removing MASTER_ROLE. - // It's used to allow CLUSTER_MANAGER_ROLE that introduced in 2.0, having the same abbreviation name with MASTER_ROLE. - final Map roleNameToPossibleRolesWithMaster = new HashMap<>(roleNameToPossibleRoles); - roleNameToPossibleRolesWithMaster.put(DiscoveryNodeRole.MASTER_ROLE.roleName(), DiscoveryNodeRole.MASTER_ROLE); - roleMap = Collections.unmodifiableMap(roleNameToPossibleRolesWithMaster); + roleMap = roleNameToPossibleRoles; + } + + /** + * Load the deprecated {@link DiscoveryNodeRole#MASTER_ROLE}. + * Master role is not added into BUILT_IN_ROLES, because {@link #setAdditionalRoles(Set)} check role name abbreviation duplication, + * and CLUSTER_MANAGER_ROLE has the same abbreviation name with MASTER_ROLE. + */ + public static void setDeprecatedMasterRole() { + final Map modifiableRoleMap = new HashMap<>(roleMap); + modifiableRoleMap.put(DiscoveryNodeRole.MASTER_ROLE.roleName(), DiscoveryNodeRole.MASTER_ROLE); + roleMap = Collections.unmodifiableMap(modifiableRoleMap); } public static Set getPossibleRoleNames() { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 92e9815313fa0..24300c884d194 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -429,6 +429,8 @@ protected Node( .collect(Collectors.toSet()); DiscoveryNode.setAdditionalRoles(additionalRoles); + DiscoveryNode.setDeprecatedMasterRole(); + /* * Create the environment based on the finalized view of the settings. This is to ensure that components get the same setting * values, no matter they ask for them from. diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java index 630902f94d335..efcb80f8e3429 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java @@ -56,8 +56,7 @@ public void testIsIngestNode() { } public void testIsMasterNode() { - // It's used to add MASTER_ROLE into 'roleMap', because MASTER_ROLE is removed from DiscoveryNodeRole.BUILT_IN_ROLES in 2.0. - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); + DiscoveryNode.setDeprecatedMasterRole(); runRoleTest(DiscoveryNode::isClusterManagerNode, DiscoveryNodeRole.MASTER_ROLE); } diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index abd1cae1ed97d..70a64fc60bdb4 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -176,11 +176,11 @@ public void testDiscoveryNodeIsRemoteClusterClientUnset() { } // Added in 2.0 temporarily, validate the MASTER_ROLE is in the list of known roles. - // MASTER_ROLE was removed from BUILT_IN_ROLES and is imported by setAdditionalRoles(), + // MASTER_ROLE was removed from BUILT_IN_ROLES and is imported by setDeprecatedMasterRole(), // as a workaround for making the new CLUSTER_MANAGER_ROLE has got the same abbreviation 'm'. // The test validate this behavior. - public void testSetAdditionalRolesCanAddDeprecatedMasterRole() { - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); + public void testSetDeprecatedMasterRoleCanAddMasterRole() { + DiscoveryNode.setDeprecatedMasterRole(); assertTrue(DiscoveryNode.getPossibleRoleNames().contains(DiscoveryNodeRole.MASTER_ROLE.roleName())); } diff --git a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java index 3248b97b8b71f..0a3af34bc12f4 100644 --- a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java +++ b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java @@ -26,8 +26,7 @@ public class NodeRoleSettingsTests extends OpenSearchTestCase { * Remove the test after removing MASTER_ROLE. */ public void testClusterManagerAndMasterRoleCanNotCoexist() { - // It's used to add MASTER_ROLE into 'roleMap', because MASTER_ROLE is removed from DiscoveryNodeRole.BUILT_IN_ROLES in 2.0. - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); + DiscoveryNode.setDeprecatedMasterRole(); Settings roleSettings = Settings.builder().put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "cluster_manager, master").build(); Exception exception = expectThrows(IllegalArgumentException.class, () -> NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings)); assertThat(exception.getMessage(), containsString("[master, cluster_manager] can not be assigned together to a node")); @@ -49,8 +48,7 @@ public void testClusterManagerAndDataNodeRoles() { * Remove the test after removing MASTER_ROLE. */ public void testMasterRoleDeprecationMessage() { - // It's used to add MASTER_ROLE into 'roleMap', because MASTER_ROLE is removed from DiscoveryNodeRole.BUILT_IN_ROLES in 2.0. - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); + DiscoveryNode.setDeprecatedMasterRole(); Settings roleSettings = Settings.builder().put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "master").build(); assertEquals(Collections.singletonList(DiscoveryNodeRole.MASTER_ROLE), NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings)); assertWarnings(DiscoveryNodeRole.MASTER_ROLE_DEPRECATION_MESSAGE); From 4b08d386e7e567071365029bd1d71b3bb0388cc7 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Sun, 2 Oct 2022 20:09:39 +0530 Subject: [PATCH 011/122] Add APIs (GET/PUT) to decommission awareness attribute (#4261) * Add APIs (GET/PUT) to decommission awareness attribute Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../client/RestHighLevelClientTests.java | 4 +- .../cluster.get_decommission_awareness.json | 19 ++ .../cluster.put_decommission_awareness.json | 29 +++ .../org/opensearch/action/ActionModule.java | 12 ++ .../get/GetDecommissionStateAction.java | 26 +++ .../get/GetDecommissionStateRequest.java | 40 +++++ .../GetDecommissionStateRequestBuilder.java | 30 ++++ .../get/GetDecommissionStateResponse.java | 166 ++++++++++++++++++ .../TransportGetDecommissionStateAction.java | 88 ++++++++++ .../awareness/get/package-info.java | 10 ++ .../decommission/awareness/package-info.java | 10 ++ .../awareness/put/DecommissionAction.java | 25 +++ .../awareness/put/DecommissionRequest.java | 84 +++++++++ .../put/DecommissionRequestBuilder.java | 38 ++++ .../awareness/put/DecommissionResponse.java | 37 ++++ .../put/TransportDecommissionAction.java | 81 +++++++++ .../awareness/put/package-info.java | 10 ++ .../opensearch/client/ClusterAdminClient.java | 36 ++++ .../java/org/opensearch/client/Requests.java | 20 +++ .../client/support/AbstractClient.java | 39 ++++ .../coordination/JoinTaskExecutor.java | 2 +- .../decommission/DecommissionService.java | 11 +- .../admin/cluster/RestDecommissionAction.java | 54 ++++++ .../RestGetDecommissionStateAction.java | 46 +++++ .../GetDecommissionStateResponseTests.java | 37 ++++ .../put/DecommissionRequestTests.java | 61 +++++++ .../put/DecommissionResponseTests.java | 21 +++ .../coordination/JoinTaskExecutorTests.java | 12 +- .../DecommissionServiceTests.java | 14 +- .../cluster/RestDecommissionActionTests.java | 50 ++++++ 31 files changed, 1093 insertions(+), 20 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java create mode 100644 server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 305d00262e821..5ad53f4fdc803 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) +- Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) ### Deprecated diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index c0eb344a64dba..07bf98a8ff0a3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -889,7 +889,9 @@ public void testApiNamingConventions() throws Exception { "nodes.reload_secure_settings", "search_shards", "remote_store.restore", - "cluster.put_weighted_routing", }; + "cluster.put_weighted_routing", + "cluster.put_decommission_awareness", + "cluster.get_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json new file mode 100644 index 0000000000000..430f96921fbc2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -0,0 +1,19 @@ +{ + "cluster.get_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Get details and status of decommissioned attribute" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/_status", + "methods": [ + "GET" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json new file mode 100644 index 0000000000000..bf4ffd454d9df --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json @@ -0,0 +1,29 @@ +{ + "cluster.put_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Decommissions an awareness attribute" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}", + "methods": [ + "PUT" + ], + "parts": { + "awareness_attribute_name": { + "type": "string", + "description": "Awareness attribute name" + }, + "awareness_attribute_value": { + "type": "string", + "description": "Awareness attribute value" + } + } + } + ] + } + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index e745e505d9fe5..ac9fd8e5fea3e 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -40,6 +40,10 @@ import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; import org.opensearch.action.admin.cluster.health.TransportClusterHealthAction; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; @@ -306,6 +310,7 @@ import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestGetDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.rest.action.admin.cluster.RestGetScriptContextAction; import org.opensearch.rest.action.admin.cluster.RestGetScriptLanguageAction; @@ -318,6 +323,7 @@ import org.opensearch.rest.action.admin.cluster.RestNodesStatsAction; import org.opensearch.rest.action.admin.cluster.RestNodesUsageAction; import org.opensearch.rest.action.admin.cluster.RestPendingClusterTasksAction; +import org.opensearch.rest.action.admin.cluster.RestDecommissionAction; import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; @@ -686,6 +692,10 @@ public void reg // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + // Decommission actions + actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); + actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -879,6 +889,8 @@ public void initRestHandlers(Supplier nodesInCluster) { } } registerHandler.accept(new RestCatAction(catActions)); + registerHandler.accept(new RestDecommissionAction()); + registerHandler.accept(new RestGetDecommissionStateAction()); // Remote Store APIs if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java new file mode 100644 index 0000000000000..72fd1a26cb860 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionType; + +/** + * Get decommission action + * + * @opensearch.internal + */ +public class GetDecommissionStateAction extends ActionType { + + public static final GetDecommissionStateAction INSTANCE = new GetDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/get"; + + private GetDecommissionStateAction() { + super(NAME, GetDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java new file mode 100644 index 0000000000000..90150c71bf3f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Get Decommissioned attribute request + * + * @opensearch.internal + */ +public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { + + public GetDecommissionStateRequest() {} + + public GetDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..2b8616d0511cd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Get decommission request builder + * + * @opensearch.internal + */ +public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< + GetDecommissionStateRequest, + GetDecommissionStateResponse, + GetDecommissionStateRequestBuilder> { + + /** + * Creates new get decommissioned attributes request builder + */ + public GetDecommissionStateRequestBuilder(OpenSearchClient client, GetDecommissionStateAction action) { + super(client, action, new GetDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java new file mode 100644 index 0000000000000..2034cdb16e40f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.ActionResponse; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Response for decommission status + * + * @opensearch.internal + */ +public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { + + private DecommissionAttribute decommissionedAttribute; + private DecommissionStatus status; + + GetDecommissionStateResponse() { + this(null, null); + } + + GetDecommissionStateResponse(DecommissionAttribute decommissionedAttribute, DecommissionStatus status) { + this.decommissionedAttribute = decommissionedAttribute; + this.status = status; + } + + GetDecommissionStateResponse(StreamInput in) throws IOException { + // read decommissioned attribute and status only if it is present + if (in.readBoolean()) { + this.decommissionedAttribute = new DecommissionAttribute(in); + } + if (in.readBoolean()) { + this.status = DecommissionStatus.fromString(in.readString()); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // if decommissioned attribute is null, mark absence of decommissioned attribute + if (decommissionedAttribute == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + decommissionedAttribute.writeTo(out); + } + + // if status is null, mark absence of status + if (status == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(status.status()); + } + } + + public DecommissionAttribute getDecommissionedAttribute() { + return decommissionedAttribute; + } + + public DecommissionStatus getDecommissionStatus() { + return status; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("awareness"); + if (decommissionedAttribute != null) { + builder.field(decommissionedAttribute.attributeName(), decommissionedAttribute.attributeValue()); + } + builder.endObject(); + if (status != null) { + builder.field("status", status); + } + builder.endObject(); + return builder; + } + + public static GetDecommissionStateResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + String attributeType = "awareness"; + XContentParser.Token token; + DecommissionAttribute decommissionAttribute = null; + DecommissionStatus status = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (attributeType.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException( + "failed to parse decommission attribute type [{}], expected object", + attributeType + ); + } + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String fieldName = parser.currentName(); + String value; + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_STRING) { + value = parser.text(); + } else { + throw new OpenSearchParseException( + "failed to parse attribute [{}], expected string for attribute value", + fieldName + ); + } + decommissionAttribute = new DecommissionAttribute(fieldName, value); + parser.nextToken(); + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); + } + } else { + throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); + } + } else if ("status".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException( + "failed to parse status of decommissioning, expected string but found unknown type" + ); + } + status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); + } else { + throw new OpenSearchParseException( + "unknown field found [{}], failed to parse the decommission attribute", + currentFieldName + ); + } + } + } + return new GetDecommissionStateResponse(decommissionAttribute, status); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetDecommissionStateResponse that = (GetDecommissionStateResponse) o; + return decommissionedAttribute.equals(that.decommissionedAttribute) && status == that.status; + } + + @Override + public int hashCode() { + return Objects.hash(decommissionedAttribute, status); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java new file mode 100644 index 0000000000000..48ed13c6c0aaf --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for getting decommission status + * + * @opensearch.internal + */ +public class TransportGetDecommissionStateAction extends TransportClusterManagerNodeReadAction< + GetDecommissionStateRequest, + GetDecommissionStateResponse> { + + @Inject + public TransportGetDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDecommissionStateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetDecommissionStateResponse read(StreamInput in) throws IOException { + return new GetDecommissionStateResponse(in); + } + + @Override + protected void clusterManagerOperation( + GetDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + listener.onResponse( + new GetDecommissionStateResponse( + decommissionAttributeMetadata.decommissionAttribute(), + decommissionAttributeMetadata.status() + ) + ); + } else { + listener.onResponse(new GetDecommissionStateResponse()); + } + } + + @Override + protected ClusterBlockException checkBlock(GetDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java new file mode 100644 index 0000000000000..5b88e91cf4f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for getting status of decommission request */ +package org.opensearch.action.admin.cluster.decommission.awareness.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java new file mode 100644 index 0000000000000..e1260e638c91d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java new file mode 100644 index 0000000000000..56678650f6e35 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionType; + +/** + * Register decommission action + * + * @opensearch.internal + */ +public final class DecommissionAction extends ActionType { + public static final DecommissionAction INSTANCE = new DecommissionAction(); + public static final String NAME = "cluster:admin/decommission/awareness/put"; + + private DecommissionAction() { + super(NAME, DecommissionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java new file mode 100644 index 0000000000000..79a6688dc6049 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Register decommission request. + *

+ * Registers a decommission request with decommission attribute and timeout + * + * @opensearch.internal + */ +public class DecommissionRequest extends ClusterManagerNodeRequest { + + private DecommissionAttribute decommissionAttribute; + + public DecommissionRequest() {} + + public DecommissionRequest(DecommissionAttribute decommissionAttribute) { + this.decommissionAttribute = decommissionAttribute; + } + + public DecommissionRequest(StreamInput in) throws IOException { + super(in); + decommissionAttribute = new DecommissionAttribute(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + decommissionAttribute.writeTo(out); + } + + /** + * Sets decommission attribute for decommission request + * + * @param decommissionAttribute attribute key-value that needs to be decommissioned + * @return this request + */ + public DecommissionRequest setDecommissionAttribute(DecommissionAttribute decommissionAttribute) { + this.decommissionAttribute = decommissionAttribute; + return this; + } + + /** + * @return Returns the decommission attribute key-value + */ + public DecommissionAttribute getDecommissionAttribute() { + return this.decommissionAttribute; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (decommissionAttribute.attributeName() == null || Strings.isEmpty(decommissionAttribute.attributeName())) { + validationException = addValidationError("attribute name is missing", validationException); + } + if (decommissionAttribute.attributeValue() == null || Strings.isEmpty(decommissionAttribute.attributeValue())) { + validationException = addValidationError("attribute value is missing", validationException); + } + return validationException; + } + + @Override + public String toString() { + return "DecommissionRequest{" + "decommissionAttribute=" + decommissionAttribute + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java new file mode 100644 index 0000000000000..47af3b952c895 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; +import org.opensearch.cluster.decommission.DecommissionAttribute; + +/** + * Register decommission request builder + * + * @opensearch.internal + */ +public class DecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DecommissionRequest, + DecommissionResponse, + DecommissionRequestBuilder> { + + public DecommissionRequestBuilder(OpenSearchClient client, ActionType action, DecommissionRequest request) { + super(client, action, request); + } + + /** + * @param decommissionAttribute decommission attribute + * @return current object + */ + public DecommissionRequestBuilder setDecommissionedAttribute(DecommissionAttribute decommissionAttribute) { + request.setDecommissionAttribute(decommissionAttribute); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java new file mode 100644 index 0000000000000..499f403c8cd64 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; + +import java.io.IOException; + +/** + * Response for decommission request + * + * @opensearch.internal + */ +public class DecommissionResponse extends AcknowledgedResponse implements ToXContentObject { + + public DecommissionResponse(StreamInput in) throws IOException { + super(in); + } + + public DecommissionResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java new file mode 100644 index 0000000000000..3a067d2f110b9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for registering decommission + * + * @opensearch.internal + */ +public class TransportDecommissionAction extends TransportClusterManagerNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportDecommissionAction.class); + private final DecommissionService decommissionService; + + @Inject + public TransportDecommissionAction( + TransportService transportService, + ClusterService clusterService, + DecommissionService decommissionService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + DecommissionAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DecommissionRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DecommissionResponse read(StreamInput in) throws IOException { + return new DecommissionResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DecommissionRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation(DecommissionRequest request, ClusterState state, ActionListener listener) + throws Exception { + logger.info("starting awareness attribute [{}] decommissioning", request.getDecommissionAttribute().toString()); + decommissionService.startDecommissionAction(request.getDecommissionAttribute(), listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java new file mode 100644 index 0000000000000..c361f4b95a484 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for putting a new decommission request */ +package org.opensearch.action.admin.cluster.decommission.awareness.put; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 2f62ab13b131c..eacd7ff16e6af 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -37,6 +37,12 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -810,4 +816,34 @@ public interface ClusterAdminClient extends OpenSearchClient { */ ClusterPutWeightedRoutingRequestBuilder prepareWeightedRouting(); + /** + * Decommission awareness attribute + */ + ActionFuture decommission(DecommissionRequest request); + + /** + * Decommission awareness attribute + */ + void decommission(DecommissionRequest request, ActionListener listener); + + /** + * Decommission awareness attribute + */ + DecommissionRequestBuilder prepareDecommission(DecommissionRequest request); + + /** + * Get Decommissioned attribute + */ + ActionFuture getDecommission(GetDecommissionStateRequest request); + + /** + * Get Decommissioned attribute + */ + void getDecommission(GetDecommissionStateRequest request, ActionListener listener); + + /** + * Get Decommissioned attribute + */ + GetDecommissionStateRequestBuilder prepareGetDecommission(); + } diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index 7154742de04fb..a033933cf6696 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -32,6 +32,8 @@ package org.opensearch.client; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; @@ -558,4 +560,22 @@ public static SnapshotsStatusRequest snapshotsStatusRequest(String repository) { public static ClusterPutWeightedRoutingRequest putWeightedRoutingRequest(String attributeName) { return new ClusterPutWeightedRoutingRequest(attributeName); } + + /** + * Creates a new decommission request. + * + * @return returns put decommission request + */ + public static DecommissionRequest decommissionRequest() { + return new DecommissionRequest(); + } + + /** + * Get decommissioned attribute from metadata + * + * @return returns get decommission request + */ + public static GetDecommissionStateRequest getDecommissionStateRequest() { + return new GetDecommissionStateRequest(); + } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index efd18dd4947ad..f74acd01f107c 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -43,6 +43,14 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; @@ -1344,6 +1352,37 @@ public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript() { public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript(String id) { return prepareDeleteStoredScript().setId(id); } + + @Override + public ActionFuture decommission(DecommissionRequest request) { + return execute(DecommissionAction.INSTANCE, request); + } + + @Override + public void decommission(DecommissionRequest request, ActionListener listener) { + execute(DecommissionAction.INSTANCE, request, listener); + } + + @Override + public DecommissionRequestBuilder prepareDecommission(DecommissionRequest request) { + return new DecommissionRequestBuilder(this, DecommissionAction.INSTANCE, request); + } + + @Override + public ActionFuture getDecommission(GetDecommissionStateRequest request) { + return execute(GetDecommissionStateAction.INSTANCE, request); + } + + @Override + public void getDecommission(GetDecommissionStateRequest request, ActionListener listener) { + execute(GetDecommissionStateAction.INSTANCE, request, listener); + } + + @Override + public GetDecommissionStateRequestBuilder prepareGetDecommission() { + return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); + } + } static class IndicesAdmin implements IndicesAdminClient { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 78b09dc020dfc..814aa17255931 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -484,7 +484,7 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) if (decommissionAttribute != null && status != null) { // We will let the node join the cluster if the current status is in FAILED state if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) - && status.equals(DecommissionStatus.FAILED) == false) { + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { throw new NodeDecommissionedException( "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", node.toString(), diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 1a0704c5a4ac2..c31ea53a5fd16 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -13,11 +13,11 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.NotClusterManagerException; -import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -117,7 +117,7 @@ private void setForcedAwarenessAttributes(Settings forceSettings) { */ public void startDecommissionAction( final DecommissionAttribute decommissionAttribute, - final ActionListener listener + final ActionListener listener ) { // register the metadata with status as INIT as first step clusterService.submitStateUpdateTask("decommission [" + decommissionAttribute + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -163,7 +163,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS private synchronized void decommissionClusterManagerNodes( final DecommissionAttribute decommissionAttribute, - ActionListener listener + ActionListener listener ) { ClusterState state = clusterService.getClusterApplierService().state(); // since here metadata is already registered with INIT, we can guarantee that no new node with decommission attribute can further @@ -183,7 +183,8 @@ private synchronized void decommissionClusterManagerNodes( final Predicate allNodesRemovedAndAbdicated = clusterState -> { final Set votingConfigNodeIds = clusterState.getLastCommittedConfiguration().getNodeIds(); return nodeIdsToBeExcluded.stream().noneMatch(votingConfigNodeIds::contains) - && nodeIdsToBeExcluded.contains(clusterState.nodes().getClusterManagerNodeId()) == false; + && nodeIdsToBeExcluded.contains(clusterState.nodes().getClusterManagerNodeId()) == false + && clusterState.nodes().getClusterManagerNodeId() != null; }; ActionListener exclusionListener = new ActionListener() { @@ -205,7 +206,7 @@ public void onResponse(Void unused) { // we are good here to send the response now as the request is processed by an eligible active leader // and to-be-decommissioned cluster manager is no more part of Voting Configuration and no more to-be-decommission // nodes can be part of Voting Config - listener.onResponse(new ClusterStateUpdateResponse(true)); + listener.onResponse(new DecommissionResponse(true)); failDecommissionedNodes(clusterService.getClusterApplierService().state()); } } else { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java new file mode 100644 index 0000000000000..979bf05f537b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** + * Registers decommission action + * + * @opensearch.api + */ +public class RestDecommissionAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(PUT, "/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}")); + } + + @Override + public String getName() { + return "decommission_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DecommissionRequest decommissionRequest = createRequest(request); + return channel -> client.admin().cluster().decommission(decommissionRequest, new RestToXContentListener<>(channel)); + } + + DecommissionRequest createRequest(RestRequest request) throws IOException { + DecommissionRequest decommissionRequest = Requests.decommissionRequest(); + String attributeName = request.param("awareness_attribute_name"); + String attributeValue = request.param("awareness_attribute_value"); + return decommissionRequest.setDecommissionAttribute(new DecommissionAttribute(attributeName, attributeValue)); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java new file mode 100644 index 0000000000000..8bc89ebf37960 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Returns decommissioned attribute information + * + * @opensearch.api + */ +public class RestGetDecommissionStateAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cluster/decommission/awareness/_status")); + } + + @Override + public String getName() { + return "get_decommission_state_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + GetDecommissionStateRequest getDecommissionStateRequest = Requests.getDecommissionStateRequest(); + return channel -> client.admin().cluster().getDecommission(getDecommissionStateRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java new file mode 100644 index 0000000000000..97bc54d8d7b30 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetDecommissionStateResponseTests extends AbstractXContentTestCase { + @Override + protected GetDecommissionStateResponse createTestInstance() { + DecommissionStatus status = randomFrom(DecommissionStatus.values()); + String attributeName = randomAlphaOfLength(10); + String attributeValue = randomAlphaOfLength(10); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + return new GetDecommissionStateResponse(decommissionAttribute, status); + } + + @Override + protected GetDecommissionStateResponse doParseInstance(XContentParser parser) throws IOException { + return GetDecommissionStateResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java new file mode 100644 index 0000000000000..c189b5702dea0 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DecommissionRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + String attributeName = "zone"; + String attributeValue = "zone-1"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + final DecommissionRequest originalRequest = new DecommissionRequest(decommissionAttribute); + + final DecommissionRequest deserialized = copyWriteable(originalRequest, writableRegistry(), DecommissionRequest::new); + + assertEquals(deserialized.getDecommissionAttribute(), originalRequest.getDecommissionAttribute()); + } + + public void testValidation() { + { + String attributeName = null; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = "zone"; + String attributeValue = ""; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute value is missing")); + } + { + String attributeName = "zone"; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java new file mode 100644 index 0000000000000..5ee5a5f3cf016 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponseTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.put; + +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DecommissionResponseTests extends OpenSearchTestCase { + public void testSerialization() throws IOException { + final DecommissionResponse originalRequest = new DecommissionResponse(true); + copyWriteable(originalRequest, writableRegistry(), DecommissionResponse::new); + // there are no fields so we're just checking that this doesn't throw anything + } +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 28f1a92cc1bdc..5aa582a5e73f6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -235,11 +235,7 @@ public void testJoinClusterWithNoDecommission() { public void testPreventJoinClusterWithDecommission() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); - DecommissionStatus decommissionStatus = randomFrom( - DecommissionStatus.INIT, - DecommissionStatus.IN_PROGRESS, - DecommissionStatus.SUCCESSFUL - ); + DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( decommissionAttribute, decommissionStatus @@ -252,7 +248,11 @@ public void testPreventJoinClusterWithDecommission() { public void testJoinClusterWithDifferentDecommission() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); - DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.values()); + DecommissionStatus decommissionStatus = randomFrom( + DecommissionStatus.INIT, + DecommissionStatus.IN_PROGRESS, + DecommissionStatus.SUCCESSFUL + ); DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( decommissionAttribute, decommissionStatus diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 71ee61ffec275..61a6662ef0cf3 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -13,9 +13,9 @@ import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -120,9 +120,9 @@ public void shutdownThreadPoolAndClusterService() { public void testDecommissioningNotStartedForInvalidAttributeName() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("rack", "rack-a"); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + public void onResponse(DecommissionResponse decommissionResponse) { fail("on response shouldn't have been called"); } @@ -141,9 +141,9 @@ public void onFailure(Exception e) { public void testDecommissioningNotStartedForInvalidAttributeValue() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "rack-a"); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + public void onResponse(DecommissionResponse decommissionResponse) { fail("on response shouldn't have been called"); } @@ -177,9 +177,9 @@ public void testDecommissioningFailedWhenAnotherAttributeDecommissioningSuccessf clusterService, builder.metadata(Metadata.builder(clusterService.state().metadata()).decommissionAttributeMetadata(oldMetadata).build()) ); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + public void onResponse(DecommissionResponse decommissionResponse) { fail("on response shouldn't have been called"); } diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java new file mode 100644 index 0000000000000..b724de0bd5cc6 --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.junit.Before; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.test.rest.RestActionTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class RestDecommissionActionTests extends RestActionTestCase { + + private RestDecommissionAction action; + + @Before + public void setupAction() { + action = new RestDecommissionAction(); + controller().registerHandler(action); + } + + public void testCreateRequest() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + + private FakeRestRequest buildRestRequest(Map params) { + return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}") + .withParams(params) + .build(); + } +} From 76c372f261381e9730a33eda087b8839c9e332fc Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Mon, 3 Oct 2022 08:45:47 -0700 Subject: [PATCH 012/122] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary. (#4630) Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 Co-authored-by: Suraj Singh --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShard.java | 4 +++ .../SegmentReplicationIndexShardTests.java | 31 +++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ad53f4fdc803..a84815ac1cc52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9185ef0d440ce..d05f7c34f80ce 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1441,6 +1441,10 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp logger.warn("Ignoring new replication checkpoint - shard is in primaryMode and cannot receive any checkpoints."); return false; } + if (this.routingEntry().primary()) { + logger.warn("Ignoring new replication checkpoint - primary shard cannot receive any checkpoints."); + return false; + } ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); if (localCheckpoint.isAheadOf(requestCheckpoint)) { logger.trace( diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 007317f6e71cd..da04ea1b9914b 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -62,6 +62,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -208,6 +209,36 @@ public void testPublishCheckpointAfterRelocationHandOff() throws IOException { closeShards(shard); } + /** + * here we are starting a new primary shard and testing that we don't process a checkpoint on a shard when it's shard routing is primary. + */ + public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { + IndexShard primaryShard = newStartedShard(true); + SegmentReplicationTargetService sut; + sut = prepareForReplication(primaryShard); + SegmentReplicationTargetService spy = spy(sut); + + // Starting a new shard in PrimaryMode and shard routing primary. + IndexShard spyShard = spy(primaryShard); + String id = primaryShard.routingEntry().allocationId().getId(); + + // Starting relocation handoff + primaryShard.getReplicationTracker().startRelocationHandoff(id); + + // Completing relocation handoff. + primaryShard.getReplicationTracker().completeRelocationHandoff(); + + // Assert that primary shard is no longer in Primary Mode and shard routing is still Primary + assertEquals(false, primaryShard.getReplicationTracker().isPrimaryMode()); + assertEquals(true, primaryShard.routingEntry().primary()); + + spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L, 0L), spyShard); + + // Verify that checkpoint is not processed as shard routing is primary. + verify(spy, times(0)).startReplication(any(), any(), any()); + closeShards(primaryShard); + } + public void testReplicaReceivesGenIncrease() throws Exception { try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { shards.startAll(); From 9e5ea465b114346dfb1315cce7c84a8fe554aae8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 09:28:17 -0700 Subject: [PATCH 013/122] Bump azure-storage-common from 12.18.0 to 12.18.1 in /plugins/repository-azure (#4664) * Bump azure-storage-common in /plugins/repository-azure Bumps [azure-storage-common](https://github.com/Azure/azure-sdk-for-java) from 12.18.0 to 12.18.1. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.18.0...azure-storage-common_12.18.1) --- updated-dependencies: - dependency-name: com.azure:azure-storage-common dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-common-12.18.0.jar.sha1 | 1 - .../licenses/azure-storage-common-12.18.1.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.18.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a84815ac1cc52..a1890b9e15943 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 +- Bumps `azure-storage-common` from 12.18.0 to 12.18.1 ### Dependencies @@ -119,4 +120,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 17d93a7e24e61..8ca151d1e90db 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.31.0' - api 'com.azure:azure-storage-common:12.18.0' + api 'com.azure:azure-storage-common:12.18.1' api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 deleted file mode 100644 index f824d6cdf4f18..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cb6fa5863f5cd8406934baec739285209165ef4b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.18.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.18.1.jar.sha1 new file mode 100644 index 0000000000000..94d9c14392c11 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.18.1.jar.sha1 @@ -0,0 +1 @@ +09f5229d0775dff1b21cc3cb2936de751e79b5ac \ No newline at end of file From b074c0f93a827c1f82a7547bd21abb6aabe11065 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 10:03:34 -0700 Subject: [PATCH 014/122] Bump forbiddenapis from 3.3 to 3.4 in /buildSrc/src/testKit/thirdPartyAudit (#4662) * Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit Bumps [forbiddenapis](https://github.com/policeman-tools/forbidden-apis) from 3.3 to 3.4. - [Release notes](https://github.com/policeman-tools/forbidden-apis/releases) - [Commits](https://github.com/policeman-tools/forbidden-apis/compare/3.3...3.4) --- updated-dependencies: - dependency-name: de.thetaphi:forbiddenapis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + buildSrc/src/testKit/thirdPartyAudit/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1890b9e15943..c944a9e23c082 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 +- Bumps `forbiddenapis` from 3.3 to 3.4 ### Dependencies diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 2c86d28cf0206..537bf3c1fad71 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -40,7 +40,7 @@ repositories { } dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.3' + forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.4' jdkJarHell 'org.opensearch:opensearch-core:current' compileOnly "org.${project.properties.compileOnlyGroup}:${project.properties.compileOnlyVersion}" implementation "org.${project.properties.compileGroup}:${project.properties.compileVersion}" From c20446d9118ef9497ce1d60218d43c5b401e7d1a Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Mon, 3 Oct 2022 11:14:24 -0700 Subject: [PATCH 015/122] Fixed the SnapshotsInProgress error during index deletion (#4570) Signed-off-by: Xue Zhou --- CHANGELOG.md | 3 ++- .../DedicatedClusterSnapshotRestoreIT.java | 25 +++++++++++++++++++ .../snapshots/SnapshotsService.java | 8 +++++- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c944a9e23c082..cd4075ab01d86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) @@ -121,4 +122,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index ce76e955fcc5a..b4287f201489b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -1475,6 +1475,31 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> done"); } + public void testIndexDeletionDuringSnapshotCreationInQueue() throws Exception { + assertAcked(prepareCreate("test-idx", 1, indexSettingsNoReplicas(1))); + ensureGreen(); + indexRandomDocs("test-idx", 100); + createRepository("test-repo", "fs"); + createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); + + logger.info("--> create snapshot to be deleted and then delete"); + createSnapshot("test-repo", "test-snap-delete", Collections.singletonList("test-idx")); + clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snap-delete").execute(); + + logger.info("--> create snapshot before index deletion during above snapshot deletion"); + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") + .setWaitForCompletion(false) + .setPartial(true) + .setIndices("test-idx") + .get(); + + logger.info("delete index during snapshot creation"); + assertAcked(admin().indices().prepareDelete("test-idx")); + + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").get(); + ensureGreen("test-idx"); + } + private long calculateTotalFilesSize(List files) { return files.stream().mapToLong(f -> { try { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 0aa967f87be9b..4f672c9813d64 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -2965,8 +2965,14 @@ private SnapshotsInProgress updatedSnapshotsInProgress(ClusterState currentState updatedAssignmentsBuilder.put(shardId, updated); } } - snapshotEntries.add(entry.withStartedShards(updatedAssignmentsBuilder.build())); + final SnapshotsInProgress.Entry updatedEntry = entry.withShardStates(updatedAssignmentsBuilder.build()); + snapshotEntries.add(updatedEntry); changed = true; + // When all the required shards for a snapshot are missing, the snapshot state will be "completed" + // need to finalize it. + if (updatedEntry.state().completed()) { + newFinalizations.add(entry); + } } } else { // Entry is already completed so we will finalize it now that the delete doesn't block us after From 69f421f1722a4a6c9f4adf39850274b4d3f22164 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 3 Oct 2022 14:19:41 -0400 Subject: [PATCH 016/122] Update to Apache Lucene 9.4.0 (#4661) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-expressions-9.4.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-icu-9.4.0.jar.sha1 | 1 + ...cene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-nori-9.4.0.jar.sha1 | 1 + ...cene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 | 1 + ...ucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 | 1 + ...ucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.4.0.jar.sha1 | 1 + ...ne-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 | 1 + ...lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.4.0.jar.sha1 | 1 + ...lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 | 1 + .../licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-core-9.4.0.jar.sha1 | 1 + .../lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-grouping-9.4.0.jar.sha1 | 1 + .../lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.4.0.jar.sha1 | 1 + .../licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-join-9.4.0.jar.sha1 | 1 + .../lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-memory-9.4.0.jar.sha1 | 1 + .../licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-misc-9.4.0.jar.sha1 | 1 + .../lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-queries-9.4.0.jar.sha1 | 1 + .../lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.4.0.jar.sha1 | 1 + .../lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.4.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 | 1 + .../lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.4.0.jar.sha1 | 1 + .../lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 - server/licenses/lucene-suggest-9.4.0.jar.sha1 | 1 + .../org/opensearch/index/engine/TranslogLeafReader.java | 4 ++++ .../fieldcomparator/BytesRefFieldComparatorSource.java | 8 +++++++- .../java/org/opensearch/index/get/ShardGetService.java | 2 ++ .../opensearch/search/lookup/LeafFieldsLookupTests.java | 2 ++ 50 files changed, 39 insertions(+), 24 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-core-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-join-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.4.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index cd4075ab01d86..1257d24c2bbf5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) +- Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index aa6a14ca6e47d..a9545f5738efa 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.4.0-snapshot-ddf0d0a +lucene = 9.4.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.4+8 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index ec6906d730ac1..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f23e695b0c864fa9722e4f67d950266ca64d37b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..2b647c1270e14 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 @@ -0,0 +1 @@ +19749e264805171009836cbedecc5494b13cd920 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 83c10845cd35a..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f6cb0fd7387c6e0db3b86eef7d8677cea3e88a0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..0038e3153b139 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 @@ -0,0 +1 @@ +aa0f250558375922f3091820361156e514fe1842 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 29387f38bc10c..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6aff23715a2fba88d844ac83c61decce8ed480bd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..ec8c78a580543 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 @@ -0,0 +1 @@ +32eb1ad367ab1289804aeed95ea7216711a7764d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 54b451abf5049..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f82d3eba195134f663865e9de3f511e16fbc7351 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..438585ee3afa8 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 @@ -0,0 +1 @@ +63661714be65f882a921d281965b0779fd487b90 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 87474064fbe0f..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2af6e1996e696b1721a2ec7382bac9aa5096eecb \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..019a98dc594b3 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 @@ -0,0 +1 @@ +1034d876551fc21f7835b456dab01db21b9a4af6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 6d35832a1a643..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec01d7f91f711abd75b539bb66a437db7cf1ca67 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..cc8e31b7a248f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 @@ -0,0 +1 @@ +f704ee4b14e2fe2622bb983f04b36a32df8fd4a7 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index f93d1a153cd26..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7041b3fa92b8687a84c4ce666b5718bbbc315db1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..25e115b84308d --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 @@ -0,0 +1 @@ +a95ff17b51da6b3da641fa4053e5ee9ea2ff5daf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 77589a361badf..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a5ec9a237c2539e3cbabfadff707252e33b3599 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..7cc4a7131f866 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 @@ -0,0 +1 @@ +13e1ae2c760d8c0d7990ffe3296e46d9d8e6f842 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 0805590fd6efd..0000000000000 --- a/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3920c527fd5eee69e09f614391cef4e05c581c7f \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..d4db2877c486b --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 @@ -0,0 +1 @@ +02fbd4e87241411fcf5d34e92a50bee46ab164dc \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index a8c648e4c192a..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1dfcd42ea257355d5cbc64ac2f47142a550ae52 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..1b7b53ef9fe70 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 @@ -0,0 +1 @@ +259863dfd107645de6146b3c87b4ecee66a4d43d \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 779c9796ceae7..0000000000000 --- a/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ae4757f88e97036b30eb1eac1d21da6dabc85a5e \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0.jar.sha1 b/server/licenses/lucene-core-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..66f7f24485172 --- /dev/null +++ b/server/licenses/lucene-core-9.4.0.jar.sha1 @@ -0,0 +1 @@ +cca1116f813c0f0c63acfac4c952baf29d46d76b \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 9167482284f9d..0000000000000 --- a/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75485e3222b096027463378fe3bb2c8d1f529d25 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0.jar.sha1 b/server/licenses/lucene-grouping-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..fe79c0efd34e4 --- /dev/null +++ b/server/licenses/lucene-grouping-9.4.0.jar.sha1 @@ -0,0 +1 @@ +51bec1d5acc8ecaf9f50e047d3f86d60c7a958f4 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 2090b009a57fe..0000000000000 --- a/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2a3d1f326f6d3bd6033b5620dc84f3c20a0412d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..54700f08a3fdb --- /dev/null +++ b/server/licenses/lucene-highlighter-9.4.0.jar.sha1 @@ -0,0 +1 @@ +c8cf8c9308d8fb18a927c7ed267a14ace3990a5f \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index df74fa911f7d2..0000000000000 --- a/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c6f6058c765a955e0544c6050aeee3a5e376e47 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0.jar.sha1 b/server/licenses/lucene-join-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..752006d3a66dd --- /dev/null +++ b/server/licenses/lucene-join-9.4.0.jar.sha1 @@ -0,0 +1 @@ +99b2d3c8e137a6853a2503456897d47d4f18974b \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 3e9d65d73d36d..0000000000000 --- a/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a155679022106c7db356da32563580d8de043d7 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0.jar.sha1 b/server/licenses/lucene-memory-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..27b488699968e --- /dev/null +++ b/server/licenses/lucene-memory-9.4.0.jar.sha1 @@ -0,0 +1 @@ +881cb214e79da14de35cb0e8e6779d2722828a96 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index f056cfe5b86ef..0000000000000 --- a/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7c7ac2027a12bf02657ec3a421c8039736b98344 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0.jar.sha1 b/server/licenses/lucene-misc-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..f9924475b9acd --- /dev/null +++ b/server/licenses/lucene-misc-9.4.0.jar.sha1 @@ -0,0 +1 @@ +a126123e482e6bf2e7aea670d221a2a39d3277dc \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 41ea4a342f949..0000000000000 --- a/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25978bb82b9f78537f0511f0be64253be19de6fd \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0.jar.sha1 b/server/licenses/lucene-queries-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..65e441bfdaf90 --- /dev/null +++ b/server/licenses/lucene-queries-9.4.0.jar.sha1 @@ -0,0 +1 @@ +fe74dbfe9dba9ee9ee2cb80f151fde97fb4efd12 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index e0687571df957..0000000000000 --- a/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3b6ce41d5bd73fdcc80b5b2a40283c03525aa96 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..2d454942d52e1 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.4.0.jar.sha1 @@ -0,0 +1 @@ +13f108a8572fcf0670c7df3ba8dbe1076d0e0dbe \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index e03c731914757..0000000000000 --- a/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ae8649d2d01a416acdbe7c29f14b47a5594acb85 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..4ebcf3f6edc8f --- /dev/null +++ b/server/licenses/lucene-sandbox-9.4.0.jar.sha1 @@ -0,0 +1 @@ +e7a676a12ea50dcbf64564f4e4022f939f0a627d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index ea8b5cd1ddb1d..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58049352bb5fc8683c389eb2eb879fb4f16ff9b3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..c0f181ad19eb6 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 @@ -0,0 +1 @@ +84d956d1cb1458c51967af1c4acadd2a1f92634d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index a0c0dbe07af8f..0000000000000 --- a/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2d3a8f802e1bb439d945de81ba6b16d01b24d58a \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..3414f36b02bef --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 @@ -0,0 +1 @@ +76887ca708f23b13613e45fb9e307c548b22c6da \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 deleted file mode 100644 index 1f332eac16c72..0000000000000 --- a/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11cdb21cf08feb19e074b4a101e1550dfd544379 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0.jar.sha1 b/server/licenses/lucene-suggest-9.4.0.jar.sha1 new file mode 100644 index 0000000000000..563a0b5ad966b --- /dev/null +++ b/server/licenses/lucene-suggest-9.4.0.jar.sha1 @@ -0,0 +1 @@ +406c9c539f262449d3b1e57e7bc4302efeecaf6c \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 224c1dcab08ab..3a198743c3d8a 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -46,6 +46,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.TopDocs; @@ -84,6 +85,7 @@ public final class TranslogLeafReader extends LeafReader { 0, 0, 0, + VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, false ); @@ -101,6 +103,7 @@ public final class TranslogLeafReader extends LeafReader { 0, 0, 0, + VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, false ); @@ -118,6 +121,7 @@ public final class TranslogLeafReader extends LeafReader { 0, 0, 0, + VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, false ); diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index d7ce6ae8aba3e..430a1f90ff3a4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -100,7 +100,13 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; final BytesRef missingBytes = (BytesRef) missingObject(missingValue, reversed); if (indexFieldData instanceof IndexOrdinalsFieldData) { - FieldComparator cmp = new TermOrdValComparator(numHits, indexFieldData.getFieldName(), sortMissingLast, reversed) { + FieldComparator cmp = new TermOrdValComparator( + numHits, + indexFieldData.getFieldName(), + sortMissingLast, + reversed, + enableSkipping + ) { @Override protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException { diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index 7b34ab5f0d5da..08e2b32bded0e 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -39,6 +39,7 @@ import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Term; +import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; @@ -326,6 +327,7 @@ private GetResult innerGetLoadFromStoredFields( 0, 0, 0, + VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, false ); diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java index 7deb6845af607..0155e288a96fd 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -81,6 +82,7 @@ public void setUp() throws Exception { 0, 0, 0, + VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, false ); From a17692d0124e808b9539384e9107a2ee4a0ebe22 Mon Sep 17 00:00:00 2001 From: Vishal Sarda Date: Mon, 3 Oct 2022 14:44:34 -0700 Subject: [PATCH 017/122] Fixed day of year defaulting for round up parser (#4627) Signed-off-by: Vishal Sarda --- CHANGELOG.md | 5 +++ .../org/opensearch/common/time/EpochTime.java | 4 +-- .../common/time/JavaDateFormatter.java | 36 ++++++++++++------- .../common/time/JavaDateMathParserTests.java | 10 ++++++ 4 files changed, 41 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1257d24c2bbf5..da7bae3761ff8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- Fixed day of year defaulting for round up parser ([#4627](https://github.com/opensearch-project/OpenSearch/pull/4627)) +- Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) +- [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) +- [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) +- [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) - Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) diff --git a/server/src/main/java/org/opensearch/common/time/EpochTime.java b/server/src/main/java/org/opensearch/common/time/EpochTime.java index c80d95aad1283..19e70fbc2202d 100644 --- a/server/src/main/java/org/opensearch/common/time/EpochTime.java +++ b/server/src/main/java/org/opensearch/common/time/EpochTime.java @@ -259,7 +259,7 @@ public long getFrom(TemporalAccessor temporal) { static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter( "epoch_second", SECONDS_FORMATTER1, - builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), + (builder, parser) -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), SECONDS_FORMATTER1, SECONDS_FORMATTER2 ); @@ -267,7 +267,7 @@ public long getFrom(TemporalAccessor temporal) { static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter( "epoch_millis", MILLISECONDS_FORMATTER1, - builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + (builder, parser) -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2 ); diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index f9eeab38b2848..07ea806aa6b8d 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -51,21 +51,19 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import java.util.stream.Collectors; class JavaDateFormatter implements DateFormatter { // base fields which should be used for default parsing, when we round up for date math - private static final Map ROUND_UP_BASE_FIELDS = new HashMap<>(6); + private static final Map ROUND_UP_GENERIC_BASE_FIELDS = new HashMap<>(4); { - ROUND_UP_BASE_FIELDS.put(ChronoField.MONTH_OF_YEAR, 1L); - ROUND_UP_BASE_FIELDS.put(ChronoField.DAY_OF_MONTH, 1L); - ROUND_UP_BASE_FIELDS.put(ChronoField.HOUR_OF_DAY, 23L); - ROUND_UP_BASE_FIELDS.put(ChronoField.MINUTE_OF_HOUR, 59L); - ROUND_UP_BASE_FIELDS.put(ChronoField.SECOND_OF_MINUTE, 59L); - ROUND_UP_BASE_FIELDS.put(ChronoField.NANO_OF_SECOND, 999_999_999L); + ROUND_UP_GENERIC_BASE_FIELDS.put(ChronoField.HOUR_OF_DAY, 23L); + ROUND_UP_GENERIC_BASE_FIELDS.put(ChronoField.MINUTE_OF_HOUR, 59L); + ROUND_UP_GENERIC_BASE_FIELDS.put(ChronoField.SECOND_OF_MINUTE, 59L); + ROUND_UP_GENERIC_BASE_FIELDS.put(ChronoField.NANO_OF_SECOND, 999_999_999L); } private final String format; @@ -96,14 +94,25 @@ JavaDateFormatter getRoundupParser() { // named formatters use default roundUpParser JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { - this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); + this(format, printer, ROUND_UP_BASE_FIELDS, parsers); } + private static final BiConsumer ROUND_UP_BASE_FIELDS = (builder, parser) -> { + String parserString = parser.toString(); + if (parserString.contains(ChronoField.DAY_OF_YEAR.toString())) { + builder.parseDefaulting(ChronoField.DAY_OF_YEAR, 1L); + } else { + builder.parseDefaulting(ChronoField.MONTH_OF_YEAR, 1L); + builder.parseDefaulting(ChronoField.DAY_OF_MONTH, 1L); + } + ROUND_UP_GENERIC_BASE_FIELDS.forEach(builder::parseDefaulting); + }; + // subclasses override roundUpParser JavaDateFormatter( String format, DateTimeFormatter printer, - Consumer roundupParserConsumer, + BiConsumer roundupParserConsumer, DateTimeFormatter... parsers ) { if (printer == null) { @@ -138,13 +147,16 @@ JavaDateFormatter getRoundupParser() { * DateFormatters. * This means that we need to also have multiple RoundUp parsers. */ - private List createRoundUpParser(String format, Consumer roundupParserConsumer) { + private List createRoundUpParser( + String format, + BiConsumer roundupParserConsumer + ) { if (format.contains("||") == false) { List roundUpParsers = new ArrayList<>(); for (DateTimeFormatter parser : this.parsers) { DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); builder.append(parser); - roundupParserConsumer.accept(builder); + roundupParserConsumer.accept(builder, parser); roundUpParsers.add(builder.toFormatter(locale())); } return roundUpParsers; diff --git a/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java index 504741f56efed..a5c7aa00d7cad 100644 --- a/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java @@ -131,6 +131,16 @@ public void testBasicDates() { assertDateMathEquals("2014-05-30T20:21:35.123", "2014-05-30T20:21:35.123"); } + public void testDayOfYearWithMissingFields() { + DateFormatter formatter = DateFormatter.forPattern("yyyy[-DDD'T'HH:mm:ss.SSS]"); + assertDateMathEquals(formatter.toDateMathParser(), "2022", "2022-01-01T23:59:59.999Z", 0, true, ZoneOffset.UTC); + } + + public void testDayOfYear() { + DateFormatter formatter = DateFormatter.forPattern("yyyy[-DDD'T'HH:mm:ss.SSS]"); + assertDateMathEquals(formatter.toDateMathParser(), "2022-104T14:08:30.293", "2022-04-14T14:08:30.293", 0, true, ZoneOffset.UTC); + } + public void testRoundingDoesNotAffectExactDate() { assertDateMathEquals("2014-11-12T22:55:00.000Z", "2014-11-12T22:55:00.000Z", 0, true, null); assertDateMathEquals("2014-11-12T22:55:00.000Z", "2014-11-12T22:55:00.000Z", 0, false, null); From 0c80a7a148ceba7ad1ca6c91ac00a0a063a19e80 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Tue, 4 Oct 2022 12:54:49 +0530 Subject: [PATCH 018/122] Fix PIT flaky tests (#4632) * Fixing PIT flaky tests Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../src/test/java/org/opensearch/client/PitIT.java | 7 ++++++- .../test/java/org/opensearch/search/PitMultiNodeTests.java | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da7bae3761ff8..ae9c243082f5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) - [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) +- Fixing PIT flaky tests ([4632](https://github.com/opensearch-project/OpenSearch/pull/4632)) - Fixed day of year defaulting for round up parser ([#4627](https://github.com/opensearch-project/OpenSearch/pull/4627)) - Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index cbb4db10cd519..be9b614a8720f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -71,7 +72,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException { + public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -90,9 +91,11 @@ public void testDeleteAllAndListAllPits() throws IOException { List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); + CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { + countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -100,6 +103,7 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { + countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } @@ -123,6 +127,7 @@ public void onFailure(Exception e) { }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); // validate no pits case getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); assertTrue(getAllPitResponse.getPitInfos().size() == 0); diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index a23e4141a78e4..b11a80b9d8726 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -98,7 +98,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); ExecutionException ex = expectThrows(ExecutionException.class, execute::get); assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); - assertTrue(ex.getMessage().contains("Partial shards failure")); validatePitStats("index", 0, 0); return super.onNodeStopped(nodeName); } From 7b7ca9886ea5b35f14d7ca98bb365000d0287674 Mon Sep 17 00:00:00 2001 From: Pavlo Smahin Date: Tue, 4 Oct 2022 18:40:18 +0300 Subject: [PATCH 019/122] Add getter for path field in NestedQueryBuilder (#4636) --- CHANGELOG.md | 1 + .../org/opensearch/index/query/NestedQueryBuilder.java | 7 +++++++ .../opensearch/index/query/NestedQueryBuilderTests.java | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae9c243082f5e..cd67e6d8ca44f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Add snapshot and restore tests for segment replication feature ([#3993](https://github.com/opensearch-project/OpenSearch/pull/3993)) - Added missing javadocs for `:example-plugins` modules ([#4540](https://github.com/opensearch-project/OpenSearch/pull/4540)) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) +- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 29d34aee42382..f98669210140d 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -130,6 +130,13 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(ignoreUnmapped); } + /** + * Returns path of the nested query. + */ + public String path() { + return path; + } + /** * Returns the nested query to execute. */ diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index abc3e0bb8c4c3..91cfc90a9cebb 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -147,6 +147,10 @@ public void testSerializationBWC() throws IOException { } } + public void testPath() { + assertEquals("nested1", createTestQueryBuilder().path()); + } + public void testValidate() { QueryBuilder innerQuery = RandomQueryBuilder.createQuery(random()); IllegalArgumentException e = expectThrows( From 41f2b9ab209fbddb8a42225c983d9b48c26f7c21 Mon Sep 17 00:00:00 2001 From: Braddy Yeoh Date: Tue, 4 Oct 2022 18:12:29 +0100 Subject: [PATCH 020/122] Pre-commit checks now check for missing javadoc, and the Gradle check Jenkins build now waits for pre-commit checks to pass before attempting (#4660) * Ignore all malformed objects when ignore_malformed is true (#4494) Fixes a bug to not fail the entire document when "ignore_malformed" is set to true. Allowing the valid fields to be indexed and ignore only the malformed fields. Signed-off-by: Hauck Signed-off-by: Braddy Yeoh * Adding javadoc check to the precommi\t GitHub Action Signed-off-by: Braddy Yeoh * gradle check now only runs after successful gradle precommit check Signed-off-by: Braddy Yeoh * Added to CHANGELOG Signed-off-by: Braddy Yeoh yeohbraddy@gmail.com Signed-off-by: Braddy Yeoh Signed-off-by: Hauck Signed-off-by: Braddy Yeoh Signed-off-by: Braddy Yeoh yeohbraddy@gmail.com Co-authored-by: Hauck <67768441+hauck-jvsh@users.noreply.github.com> Co-authored-by: Braddy Yeoh --- .github/workflows/gradle-check.yml | 5 +++++ .github/workflows/precommit.yml | 2 +- CHANGELOG.md | 12 +++++++++++- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index cbaa7fa10fbb6..3e3be32bc31e6 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -7,10 +7,15 @@ on: - 'dependabot/**' pull_request_target: types: [opened, synchronize, reopened] + workflow_run: + workflows: [Gradle Precommit] + types: + - completed jobs: gradle-check: runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} timeout-minutes: 130 steps: - name: Checkout OpenSearch repo diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 9860be4159b83..edf380c93614e 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -13,4 +13,4 @@ jobs: distribution: adopt - name: Run Gradle run: | - ./gradlew precommit --parallel + ./gradlew javadoc precommit --parallel diff --git a/CHANGELOG.md b/CHANGELOG.md index cd67e6d8ca44f..9e277c4dc965a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,11 @@ # CHANGELOG + Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] + ### Added + - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -44,6 +47,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) ### Changed + - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -59,8 +63,10 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) +- Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) + ### Deprecated ### Removed @@ -68,6 +74,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) ### Fixed + - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -104,10 +111,13 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) ### Security + - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] + ### Added + - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -123,11 +133,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed ### Fixed + - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x From 89aaa9ecbea3be7341e5ad6fd3bb8d888b88b3bb Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 4 Oct 2022 14:28:43 -0400 Subject: [PATCH 021/122] Backport Apache Lucene version change for 2.4.0 (#4677) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 +- server/src/main/java/org/opensearch/Version.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e277c4dc965a..7f303bdf3989d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,7 +65,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - +- Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 9c53a0f449a40..1bffe9ec98ec5 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -99,7 +99,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From e403799fbccee4fc14208eab0d88ace20f96f2fe Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 4 Oct 2022 14:37:32 -0400 Subject: [PATCH 022/122] Attempt to fix Github workflow for Gradle Check job (#4679) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- .github/workflows/gradle-check.yml | 3 +-- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 3e3be32bc31e6..892d04936b743 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -8,14 +8,13 @@ on: pull_request_target: types: [opened, synchronize, reopened] workflow_run: - workflows: [Gradle Precommit] + workflows: ["Gradle Precommit"] types: - completed jobs: gradle-check: runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.conclusion == 'success' }} timeout-minutes: 130 steps: - name: Checkout OpenSearch repo diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f303bdf3989d..ed18c18cce38d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) +- Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) ### Security From 05d11b2c401811a1d3185a709f9dc9181ca4be9d Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Tue, 4 Oct 2022 15:12:36 -0700 Subject: [PATCH 023/122] Adding precommit support for Windows (#4676) * First draft for Windows precommit Signed-off-by: Sarat Vemulapalli * Adding changelog Signed-off-by: Sarat Vemulapalli Signed-off-by: Sarat Vemulapalli --- .github/workflows/precommit.yml | 7 +++++-- CHANGELOG.md | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index edf380c93614e..7dcc112f38abd 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,9 +1,12 @@ name: Gradle Precommit on: [pull_request] - + jobs: precommit: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest] steps: - uses: actions/checkout@v2 - name: Set up JDK 11 diff --git a/CHANGELOG.md b/CHANGELOG.md index ed18c18cce38d..d20a934588c4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added missing javadocs for `:example-plugins` modules ([#4540](https://github.com/opensearch-project/OpenSearch/pull/4540)) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) +- Added precommit support for windows ([#4676](https://github.com/opensearch-project/OpenSearch/pull/4676)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 From ada2467fb58dbd9375299ca0f140663e6796e934 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Tue, 4 Oct 2022 18:07:24 -0700 Subject: [PATCH 024/122] Adding precommit support for MacOS (darwin) (#4682) * First draft for Windows precommit Signed-off-by: Sarat Vemulapalli * Adding changelog Signed-off-by: Sarat Vemulapalli * Adding precommit for macos Signed-off-by: Sarat Vemulapalli * Adding Changelog Signed-off-by: Sarat Vemulapalli Signed-off-by: Sarat Vemulapalli --- .github/workflows/precommit.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 7dcc112f38abd..e264d65cdf191 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -6,7 +6,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v2 - name: Set up JDK 11 diff --git a/CHANGELOG.md b/CHANGELOG.md index d20a934588c4b..8da30638047d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Added precommit support for windows ([#4676](https://github.com/opensearch-project/OpenSearch/pull/4676)) +- Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 From e1891794786ea5e6b8f5a9199ed66ea465acfb92 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 4 Oct 2022 23:50:09 -0700 Subject: [PATCH 025/122] Fix flaky DecommissionControllerTests.testTimesOut (#4683) This test fails pretty reliably if I run it on repeat. I believe the problem is that the test assumes the function will take longer than 2ms, which is likely not a valid assumption in all cases. Fortunately, I can pass in a zero duration which is guaranteed to timeout even if the system clock does not advance at all. Also moved the assertions out of the callback into the main test method, otherwise the assertion error messages would get buried and the test report would just show a timeout error. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../DecommissionControllerTests.java | 18 +++++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8da30638047d5..3c22a0a728ee7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) +- Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) ### Security diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 8b5343184dabd..5b2dce277189c 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.decommission; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; import org.opensearch.OpenSearchTimeoutException; @@ -45,6 +46,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -53,6 +55,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; import static org.opensearch.cluster.ClusterState.builder; import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; @@ -213,24 +216,25 @@ public void testTimesOut() throws InterruptedException { nodesToBeRemoved.add(clusterService.state().nodes().get("node13")); nodesToBeRemoved.add(clusterService.state().nodes().get("node14")); nodesToBeRemoved.add(clusterService.state().nodes().get("node15")); + final AtomicReference exceptionReference = new AtomicReference<>(); decommissionController.removeDecommissionedNodes( nodesToBeRemoved, "unit-test-timeout", - TimeValue.timeValueMillis(2), - new ActionListener() { + TimeValue.timeValueMillis(0), + new ActionListener<>() { @Override - public void onResponse(Void unused) { - fail("response shouldn't have been called"); - } + public void onResponse(Void unused) {} @Override public void onFailure(Exception e) { - assertThat(e, instanceOf(OpenSearchTimeoutException.class)); - assertThat(e.getMessage(), containsString("waiting for removal of decommissioned nodes")); + exceptionReference.set(e); countDownLatch.countDown(); } } ); + MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(OpenSearchTimeoutException.class)); + MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("waiting for removal of decommissioned nodes")); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } From 39412266abffc0771c14f8178630dc4967ee2de8 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Wed, 5 Oct 2022 07:12:46 -0700 Subject: [PATCH 026/122] Include Windows OS in Bootstrap initializeNatives() check for definitelyRunningAsRoot() (#4656) * Bump jna from 5.11.0 to 5.12.1 Signed-off-by: Daniel Widdis * Include Windows OS in check for definitelyRunningAsRoot() Signed-off-by: Daniel Widdis * Combine try/catch blocks Signed-off-by: Daniel Widdis * Someone sniped my PR number between predicting and pushing Signed-off-by: Daniel Widdis * Update SHAs Signed-off-by: Daniel Widdis Signed-off-by: Daniel Widdis --- CHANGELOG.md | 2 + buildSrc/build.gradle | 2 +- buildSrc/version.properties | 2 +- server/licenses/jna-5.12.1.jar.sha1 | 1 + server/licenses/jna-5.5.0.jar.sha1 | 1 - .../bootstrap/JNAAdvapi32Library.java | 124 ++++++++++++++++++ .../org/opensearch/bootstrap/JNANatives.java | 44 ++++++- 7 files changed, 169 insertions(+), 7 deletions(-) create mode 100644 server/licenses/jna-5.12.1.jar.sha1 delete mode 100644 server/licenses/jna-5.5.0.jar.sha1 create mode 100644 server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c22a0a728ee7..95f78cb7d243d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) +- Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) ### Changed @@ -64,6 +65,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) +- Include Windows OS in Bootstrap initializeNatives() check for definitelyRunningAsRoot() ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3ef3c6f9faf49..0b23631816fe9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,7 +110,7 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' - api "net.java.dev.jna:jna:5.11.0" + api "net.java.dev.jna:jna:5.12.1" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'org.jdom:jdom2:2.0.6.1' api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a9545f5738efa..bf72245c63918 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -19,7 +19,7 @@ slf4j = 1.7.36 asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.5.0 +jna = 5.12.1 netty = 4.1.79.Final joda = 2.10.13 diff --git a/server/licenses/jna-5.12.1.jar.sha1 b/server/licenses/jna-5.12.1.jar.sha1 new file mode 100644 index 0000000000000..0d42f248c1afd --- /dev/null +++ b/server/licenses/jna-5.12.1.jar.sha1 @@ -0,0 +1 @@ +b1e93a735caea94f503e95e6fe79bf9cdc1e985d \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 deleted file mode 100644 index 5621dfc743dd0..0000000000000 --- a/server/licenses/jna-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java new file mode 100644 index 0000000000000..09b92c00684f4 --- /dev/null +++ b/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.bootstrap; + +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; +import com.sun.jna.Structure; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; + +import java.util.List; + +/** + * Library for Windows/Advapi32 + * + * @opensearch.internal + */ +final class JNAAdvapi32Library { + + private static final Logger logger = LogManager.getLogger(JNAAdvapi32Library.class); + + private static final class Holder { + private static final JNAAdvapi32Library instance = new JNAAdvapi32Library(); + } + + private JNAAdvapi32Library() { + if (Constants.WINDOWS) { + try { + Native.register("advapi32"); + logger.debug("windows/Advapi32 library loaded"); + } catch (NoClassDefFoundError e) { + logger.warn("JNA not found. native methods and handlers will be disabled."); + } catch (UnsatisfiedLinkError e) { + logger.warn("unable to link Windows/Advapi32 library. native methods and handlers will be disabled."); + } + } + } + + static JNAAdvapi32Library getInstance() { + return Holder.instance; + } + + /** + * Access right required to query an access token. + * Used by {@link #OpenProcessToken(Pointer, int, PointerByReference)}. + * + * https://learn.microsoft.com/en-us/windows/win32/secauthz/access-rights-for-access-token-objects + */ + public static final int TOKEN_QUERY = 0x0008; + + /** + * TOKEN_INFORMATION_CLASS enumeration value that specifies the type of information being assigned to or retrieved from an access token. + * Used by {@link #GetTokenInformation(Pointer, int, Structure, int, IntByReference)}. + * + * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-token_information_class + */ + public static final int TOKEN_ELEVATION = 0x14; + + /** + * Native call to the Advapi32 API to open the access token associated with a process. + * + * @param processHandle Handle to the process whose access token is opened. + * The process must have the PROCESS_QUERY_INFORMATION access permission. + * @param desiredAccess Specifies an access mask that specifies the requested types of access to the access token. + * These requested access types are compared with the discretionary access control list (DACL) of the token to determine which accesses are granted or denied. + * @param tokenHandle Pointer to a handle that identifies the newly opened access token when the function returns. + * @return If the function succeeds, the return value is true. + * If the function fails, the return value is false. + * To get extended error information, call GetLastError. + */ + native boolean OpenProcessToken(Pointer processHandle, int desiredAccess, PointerByReference tokenHandle); + + /** + * Retrieves a specified type of information about an access token. + * The calling process must have appropriate access rights to obtain the information. + * + * @param tokenHandle Handle to an access token from which information is retrieved. + * If TokenInformationClass specifies TokenSource, the handle must have TOKEN_QUERY_SOURCE access. + * For all other TokenInformationClass values, the handle must have TOKEN_QUERY access. + * @param tokenInformationClass Specifies a value from the TOKEN_INFORMATION_CLASS enumerated type to identify the type of information the function retrieves. + * @param tokenInformation Pointer to a buffer the function fills with the requested information. + * The structure put into this buffer depends upon the type of information specified by the TokenInformationClass parameter. + * @param tokenInformationLength Specifies the size, in bytes, of the buffer pointed to by the TokenInformation parameter. + * If TokenInformation is NULL, this parameter must be zero. + * @param returnLength Pointer to a variable that receives the number of bytes needed for the buffer pointed to by the TokenInformation parameter. + * If this value is larger than the value specified in the TokenInformationLength parameter, the function fails and stores no data in the buffer. + * @return If the function succeeds, the return value is true. + * If the function fails, the return value is zero. + * To get extended error information, call GetLastError. + */ + native boolean GetTokenInformation( + Pointer tokenHandle, + int tokenInformationClass, + Structure tokenInformation, + int tokenInformationLength, + IntByReference returnLength + ); + + /** + * The TOKEN_ELEVATION structure indicates whether a token has elevated privileges. + * + * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-token_elevation + */ + public static class TokenElevation extends Structure { + /** + * A nonzero value if the token has elevated privileges; otherwise, a zero value. + */ + public int TokenIsElevated; + + @Override + protected List getFieldOrder() { + return List.of("TokenIsElevated"); + } + } +} diff --git a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java index 033596033b0fd..12f65f8e4a216 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java @@ -35,14 +35,19 @@ import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.WString; +import com.sun.jna.ptr.IntByReference; +import com.sun.jna.ptr.PointerByReference; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; +import org.opensearch.bootstrap.JNAAdvapi32Library.TokenElevation; import org.opensearch.monitor.jvm.JvmInfo; import java.nio.file.Path; +import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_ELEVATION; +import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_QUERY; import static org.opensearch.bootstrap.JNAKernel32Library.SizeT; /** @@ -185,13 +190,44 @@ static String rlimitToString(long value) { /** Returns true if user is root, false if not, or if we don't know */ static boolean definitelyRunningAsRoot() { - if (Constants.WINDOWS) { - return false; // don't know - } try { + if (Constants.WINDOWS) { + JNAKernel32Library kernel32 = JNAKernel32Library.getInstance(); + JNAAdvapi32Library advapi32 = JNAAdvapi32Library.getInstance(); + + // Fetch a pseudo handle for the current process. + // The pseudo handle need not be closed when it is no longer needed (calling CloseHandle is a no-op). + Pointer process = kernel32.GetCurrentProcess(); + PointerByReference hToken = new PointerByReference(); + // Fetch the process token for the current process, for which we know we have the access rights + if (!advapi32.OpenProcessToken(process, TOKEN_QUERY, hToken)) { + logger.warn( + "Unable to open the Process Token for the current process [" + JNACLibrary.strerror(Native.getLastError()) + "]" + ); + return false; + } + // We have successfully opened the token. Ensure it gets closed after we use it. + try { + TokenElevation elevation = new TokenElevation(); + IntByReference returnLength = new IntByReference(); + if (!advapi32.GetTokenInformation(hToken.getValue(), TOKEN_ELEVATION, elevation, elevation.size(), returnLength)) { + logger.warn( + "Unable to get TokenElevation information for the current process [" + + JNACLibrary.strerror(Native.getLastError()) + + "]" + ); + return false; + } + // Nonzero value means elevated privileges + return elevation.TokenIsElevated > 0; + } finally { + kernel32.CloseHandle(hToken.getValue()); + } + } + // For unix-based systems, check effective user ID of process return JNACLibrary.geteuid() == 0; } catch (UnsatisfiedLinkError e) { - // this will have already been logged by Kernel32Library, no need to repeat it + // this will have already been logged by Native Library, no need to repeat it return false; } } From e354461d08f6aa1b4d72b6ec2fb760395886e4cf Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Wed, 5 Oct 2022 08:03:11 -0700 Subject: [PATCH 027/122] Add release notes for 1.3.6 (#4681) Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 --- CHANGELOG.md | 2 ++ release-notes/opensearch.release-notes-1.3.6.md | 10 ++++++++++ 2 files changed, 12 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.6.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 95f78cb7d243d..290fd5d74fe80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add missing Javadoc tag descriptions in unit tests ([#4629](https://github.com/opensearch-project/OpenSearch/pull/4629)) - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Added precommit support for windows ([#4676](https://github.com/opensearch-project/OpenSearch/pull/4676)) +- Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/release-notes/opensearch.release-notes-1.3.6.md b/release-notes/opensearch.release-notes-1.3.6.md new file mode 100644 index 0000000000000..63db5390ae5e2 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.6.md @@ -0,0 +1,10 @@ +## 2022-10-04 Version 1.3.6 Release Notes + +### Upgrades +* Update to Netty 4.1.80.Final ([#4379](https://github.com/opensearch-project/OpenSearch/pull/4379)) +* Revert to Netty 4.1.79.Final ([#4433](https://github.com/opensearch-project/OpenSearch/pull/4433)) +* Bumps jackson and snakeyaml dependencies ([#4600](https://github.com/opensearch-project/OpenSearch/pull/4600)) +* Bump commons-configuration2 from 2.7 to 2.8.0 in /plugins/repository-hdfs ([#4646](https://github.com/opensearch-project/OpenSearch/pull/4646)) + +### Bug Fixes +* Set analyzer to regex query string search ([#4220](https://github.com/opensearch-project/OpenSearch/pull/4220)) From 3186935deec572fcde2e49fd9264810c42d44c99 Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Thu, 6 Oct 2022 09:49:02 +0530 Subject: [PATCH 028/122] Service Layer changes for Recommission API (#4320) * Recommission API service level changes Signed-off-by: pranikum <109206473+pranikum@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../decommission/DecommissionService.java | 50 +++++++++++ .../DecommissionServiceTests.java | 85 +++++++++++++++++++ 3 files changed, 136 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 290fd5d74fe80..6b7b7b29cc904 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added precommit support for windows ([#4676](https://github.com/opensearch-project/OpenSearch/pull/4676)) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - +- Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index c31ea53a5fd16..fcab411f073ba 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -14,6 +14,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -481,4 +482,53 @@ public void onFailure(Exception e) { } }; } + + public void startRecommissionAction(final ActionListener listener) { + /* + * For abandoned requests, we might not really know if it actually restored the exclusion list. + * And can land up in cases where even after recommission, exclusions are set(which is unexpected). + * And by definition of OpenSearch - Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. + * And hence it is safe to remove the exclusion if any. User should make conscious choice before decommissioning awareness attribute. + */ + decommissionController.clearVotingConfigExclusion(new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.info("successfully cleared voting config exclusion for deleting the decommission."); + deleteDecommissionState(listener); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failure in clearing voting config during delete_decommission request.", e); + listener.onFailure(e); + } + }, false); + } + + void deleteDecommissionState(ActionListener listener) { + clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + logger.info("Deleting the decommission attribute from the cluster state"); + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute. [{}]", source), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + // Cluster state processed for deleting the decommission attribute. + assert newState.metadata().decommissionAttributeMetadata() == null; + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 61a6662ef0cf3..840ce1634a68e 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -11,9 +11,13 @@ import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -30,6 +34,7 @@ import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.util.Collections; @@ -201,6 +206,86 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testClearClusterDecommissionState() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + ClusterState state = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) + .build(); + + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse decommissionResponse) { + DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); + assertNull(metadata); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("on failure shouldn't have been called"); + countDownLatch.countDown(); + } + }; + + this.decommissionService.deleteDecommissionState(listener); + + // Decommission Attribute should be removed. + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + public void testDeleteDecommissionAttributeClearVotingExclusion() { + TransportService mockTransportService = Mockito.mock(TransportService.class); + Mockito.when(mockTransportService.getLocalNode()).thenReturn(Mockito.mock(DiscoveryNode.class)); + DecommissionService decommissionService = new DecommissionService( + Settings.EMPTY, + clusterSettings, + clusterService, + mockTransportService, + threadPool, + allocationService + ); + decommissionService.startRecommissionAction(Mockito.mock(ActionListener.class)); + + ArgumentCaptor clearVotingConfigExclusionsRequestArgumentCaptor = ArgumentCaptor.forClass( + ClearVotingConfigExclusionsRequest.class + ); + Mockito.verify(mockTransportService) + .sendRequest( + Mockito.any(DiscoveryNode.class), + Mockito.anyString(), + clearVotingConfigExclusionsRequestArgumentCaptor.capture(), + Mockito.any(TransportResponseHandler.class) + ); + + ClearVotingConfigExclusionsRequest request = clearVotingConfigExclusionsRequestArgumentCaptor.getValue(); + assertFalse(request.getWaitForRemoval()); + } + + public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + assertTrue(response.isAcknowledged()); + assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("On Failure shouldn't have been called"); + countDownLatch.countDown(); + } + }; + decommissionService.deleteDecommissionState(listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); From 563122ed66805a3e7518ec8b4f2e47ac8f49297b Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Thu, 6 Oct 2022 12:29:52 +0530 Subject: [PATCH 029/122] Add GET api to get shard routing weights (#4275) * Weighted round-robin scheduling policy for shard coordination traffic routing Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../client/RestHighLevelClientTests.java | 1 + .../api/cluster.get_weighted_routing.json | 25 ++ .../cluster/routing/WeightedRoutingIT.java | 245 ++++++++++++++++ .../org/opensearch/action/ActionModule.java | 5 + .../get/ClusterGetWeightedRoutingAction.java | 25 ++ .../get/ClusterGetWeightedRoutingRequest.java | 62 +++++ ...usterGetWeightedRoutingRequestBuilder.java | 37 +++ .../ClusterGetWeightedRoutingResponse.java | 125 +++++++++ .../TransportGetWeightedRoutingAction.java | 111 ++++++++ .../routing/weighted/get/package-info.java | 10 + .../TransportAddWeightedRoutingAction.java | 43 +-- .../opensearch/client/ClusterAdminClient.java | 18 ++ .../java/org/opensearch/client/Requests.java | 10 + .../client/support/AbstractClient.java | 22 ++ .../routing/WeightedRoutingService.java | 41 ++- .../RestClusterGetWeightedRoutingAction.java | 52 ++++ ...ClusterGetWeightedRoutingRequestTests.java | 39 +++ ...lusterGetWeightedRoutingResponseTests.java | 38 +++ ...ransportGetWeightedRoutingActionTests.java | 262 ++++++++++++++++++ .../routing/WeightedRoutingServiceTests.java | 19 +- 21 files changed, 1152 insertions(+), 39 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetWeightedRoutingAction.java create mode 100644 server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingRequestTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingResponseTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b7b7b29cc904..868ce6ed7bf9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) - [Remote Store] Change behaviour in replica recovery for remote translog enabled indices ([#4318](https://github.com/opensearch-project/OpenSearch/pull/4318)) - PUT api for weighted shard routing ([#4272](https://github.com/opensearch-project/OpenSearch/pull/4272)) +- GET api for weighted shard routing([#4275](https://github.com/opensearch-project/OpenSearch/pull/4275/)) - Unmute test RelocationIT.testRelocationWhileIndexingRandom ([#4580](https://github.com/opensearch-project/OpenSearch/pull/4580)) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 07bf98a8ff0a3..1b9c7d4aead12 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -890,6 +890,7 @@ public void testApiNamingConventions() throws Exception { "search_shards", "remote_store.restore", "cluster.put_weighted_routing", + "cluster.get_weighted_routing", "cluster.put_decommission_awareness", "cluster.get_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json new file mode 100644 index 0000000000000..45eb3d2b62a84 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json @@ -0,0 +1,25 @@ +{ + "cluster.get_weighted_routing": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/get", + "description": "Fetches weighted shard routing weights" + }, + "stability": "stable", + "url": { + "paths": [ + { + "path": "/_cluster/routing/awareness/{attribute}/weights", + "methods": [ + "GET" + ], + "parts": { + "attribute": { + "type": "string", + "description": "Awareness attribute name" + } + } + } + ] + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java new file mode 100644 index 0000000000000..6cf8292095c6a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) +public class WeightedRoutingIT extends OpenSearchIntegTestCase { + + public void testPutWeightedRouting() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> starting 6 nodes on different zones"); + int nodeCountPerAZ = 2; + + logger.info("--> starting a dedicated cluster manager node"); + internalCluster().startClusterManagerOnlyNode(Settings.builder().put(commonSettings).build()); + + logger.info("--> starting 1 nodes on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("7").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + // put call made on a data node in zone a + response = internalCluster().client(randomFrom(nodes_in_zone_a.get(0), nodes_in_zone_a.get(1))) + .admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + } + + public void testPutWeightedRouting_InvalidAwarenessAttribute() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone1", weights); + + assertThrows( + IllegalArgumentException.class, + () -> client().admin().cluster().prepareWeightedRouting().setWeightedRouting(weightedRouting).get() + ); + } + + public void testPutWeightedRouting_MoreThanOneZoneHasZeroWeight() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 0.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone1", weights); + + assertThrows( + IllegalArgumentException.class, + () -> client().admin().cluster().prepareWeightedRouting().setWeightedRouting(weightedRouting).get() + ); + } + + public void testGetWeightedRouting_WeightsNotSet() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + ClusterGetWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareGetWeightedRouting() + .setAwarenessAttribute("zone") + .get(); + assertNull(weightedRoutingResponse.weights()); + } + + public void testGetWeightedRouting_WeightsAreSet() throws IOException { + + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + int nodeCountPerAZ = 2; + + logger.info("--> starting a dedicated cluster manager node"); + internalCluster().startClusterManagerOnlyNode(Settings.builder().put(commonSettings).build()); + + logger.info("--> starting 2 nodes on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startDataOnlyNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("7").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + // get api call to fetch weights + ClusterGetWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareGetWeightedRouting() + .setAwarenessAttribute("zone") + .get(); + assertEquals(weightedRouting, weightedRoutingResponse.weights()); + + // get api to fetch local node weight for a node in zone a + weightedRoutingResponse = internalCluster().client(randomFrom(nodes_in_zone_a.get(0), nodes_in_zone_a.get(1))) + .admin() + .cluster() + .prepareGetWeightedRouting() + .setAwarenessAttribute("zone") + .setRequestLocal(true) + .get(); + assertEquals(weightedRouting, weightedRoutingResponse.weights()); + assertEquals("1.0", weightedRoutingResponse.getLocalNodeWeight()); + + // get api to fetch local node weight for a node in zone b + weightedRoutingResponse = internalCluster().client(randomFrom(nodes_in_zone_b.get(0), nodes_in_zone_b.get(1))) + .admin() + .cluster() + .prepareGetWeightedRouting() + .setAwarenessAttribute("zone") + .setRequestLocal(true) + .get(); + assertEquals(weightedRouting, weightedRoutingResponse.weights()); + assertEquals("2.0", weightedRoutingResponse.getLocalNodeWeight()); + + // get api to fetch local node weight for a node in zone c + weightedRoutingResponse = internalCluster().client(randomFrom(nodes_in_zone_c.get(0), nodes_in_zone_c.get(1))) + .admin() + .cluster() + .prepareGetWeightedRouting() + .setAwarenessAttribute("zone") + .setRequestLocal(true) + .get(); + assertEquals(weightedRouting, weightedRoutingResponse.weights()); + assertEquals("3.0", weightedRoutingResponse.getLocalNodeWeight()); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index ac9fd8e5fea3e..d959d6828a46b 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -83,6 +83,8 @@ import org.opensearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.opensearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.TransportGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.TransportAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotAction; @@ -299,6 +301,7 @@ import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetSettingsAction; +import org.opensearch.rest.action.admin.cluster.RestClusterGetWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterHealthAction; import org.opensearch.rest.action.admin.cluster.RestClusterPutWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterRerouteAction; @@ -573,6 +576,7 @@ public void reg actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); actions.register(ClusterAddWeightedRoutingAction.INSTANCE, TransportAddWeightedRoutingAction.class); + actions.register(ClusterGetWeightedRoutingAction.INSTANCE, TransportGetWeightedRoutingAction.class); actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); @@ -759,6 +763,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestOpenIndexAction()); registerHandler.accept(new RestAddIndexBlockAction()); registerHandler.accept(new RestClusterPutWeightedRoutingAction()); + registerHandler.accept(new RestClusterGetWeightedRoutingAction()); registerHandler.accept(new RestUpdateSettingsAction()); registerHandler.accept(new RestGetSettingsAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingAction.java new file mode 100644 index 0000000000000..7662b7cc6fcc8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; + +import org.opensearch.action.ActionType; + +/** + * Action to get weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterGetWeightedRoutingAction extends ActionType { + public static final ClusterGetWeightedRoutingAction INSTANCE = new ClusterGetWeightedRoutingAction(); + public static final String NAME = "cluster:admin/routing/awareness/weights/get"; + + private ClusterGetWeightedRoutingAction() { + super(NAME, ClusterGetWeightedRoutingResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java new file mode 100644 index 0000000000000..aaa000baa95f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Request to get weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterGetWeightedRoutingRequest extends ClusterManagerNodeReadRequest { + + String awarenessAttribute; + + public String getAwarenessAttribute() { + return awarenessAttribute; + } + + public void setAwarenessAttribute(String awarenessAttribute) { + this.awarenessAttribute = awarenessAttribute; + } + + public ClusterGetWeightedRoutingRequest(String awarenessAttribute) { + this.awarenessAttribute = awarenessAttribute; + } + + public ClusterGetWeightedRoutingRequest() {} + + public ClusterGetWeightedRoutingRequest(StreamInput in) throws IOException { + super(in); + awarenessAttribute = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(awarenessAttribute); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (awarenessAttribute == null || awarenessAttribute.isEmpty()) { + validationException = addValidationError("Awareness attribute is missing", validationException); + } + return validationException; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java new file mode 100644 index 0000000000000..82f4c1106461d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Request builder to get weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterGetWeightedRoutingRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< + ClusterGetWeightedRoutingRequest, + ClusterGetWeightedRoutingResponse, + ClusterGetWeightedRoutingRequestBuilder> { + + public ClusterGetWeightedRoutingRequestBuilder(OpenSearchClient client, ClusterGetWeightedRoutingAction action) { + super(client, action, new ClusterGetWeightedRoutingRequest()); + } + + public ClusterGetWeightedRoutingRequestBuilder setRequestLocal(boolean local) { + request.local(local); + return this; + } + + public ClusterGetWeightedRoutingRequestBuilder setAwarenessAttribute(String attribute) { + request.setAwarenessAttribute(attribute); + return this; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java new file mode 100644 index 0000000000000..bb77576b63d20 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.ActionResponse; + +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Response from fetching weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterGetWeightedRoutingResponse extends ActionResponse implements ToXContentObject { + private WeightedRouting weightedRouting; + private String localNodeWeight; + private static final String NODE_WEIGHT = "node_weight"; + + public String getLocalNodeWeight() { + return localNodeWeight; + } + + ClusterGetWeightedRoutingResponse() { + this.weightedRouting = null; + } + + public ClusterGetWeightedRoutingResponse(String localNodeWeight, WeightedRouting weightedRouting) { + this.localNodeWeight = localNodeWeight; + this.weightedRouting = weightedRouting; + } + + ClusterGetWeightedRoutingResponse(StreamInput in) throws IOException { + if (in.available() != 0) { + this.weightedRouting = new WeightedRouting(in); + } + } + + /** + * List of weights to return + * + * @return list or weights + */ + public WeightedRouting weights() { + return this.weightedRouting; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (weightedRouting != null) { + weightedRouting.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (this.weightedRouting != null) { + for (Map.Entry entry : weightedRouting.weights().entrySet()) { + builder.field(entry.getKey(), entry.getValue().toString()); + } + if (localNodeWeight != null) { + builder.field(NODE_WEIGHT, localNodeWeight); + } + } + builder.endObject(); + return builder; + } + + public static ClusterGetWeightedRoutingResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + XContentParser.Token token; + String attrKey = null, attrValue = null; + String localNodeWeight = null; + Map weights = new HashMap<>(); + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + attrKey = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + attrValue = parser.text(); + if (attrKey != null && attrKey.equals(NODE_WEIGHT)) { + localNodeWeight = attrValue; + } else if (attrKey != null) { + weights.put(attrKey, Double.parseDouble(attrValue)); + } + } else { + throw new OpenSearchParseException("failed to parse weighted routing response"); + } + } + WeightedRouting weightedRouting = new WeightedRouting("", weights); + return new ClusterGetWeightedRoutingResponse(localNodeWeight, weightedRouting); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterGetWeightedRoutingResponse that = (ClusterGetWeightedRoutingResponse) o; + return weightedRouting.equals(that.weightedRouting) && localNodeWeight.equals(that.localNodeWeight); + } + + @Override + public int hashCode() { + return Objects.hash(weightedRouting, localNodeWeight); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java new file mode 100644 index 0000000000000..9421967a5df26 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; + +import org.opensearch.action.ActionListener; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; + +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.cluster.routing.WeightedRoutingService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; + +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for getting weights for weighted round-robin search routing policy + * + * @opensearch.internal + */ +public class TransportGetWeightedRoutingAction extends TransportClusterManagerNodeReadAction< + ClusterGetWeightedRoutingRequest, + ClusterGetWeightedRoutingResponse> { + private static final Logger logger = LogManager.getLogger(TransportGetWeightedRoutingAction.class); + private final WeightedRoutingService weightedRoutingService; + + @Inject + public TransportGetWeightedRoutingAction( + TransportService transportService, + ClusterService clusterService, + WeightedRoutingService weightedRoutingService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + ClusterGetWeightedRoutingAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ClusterGetWeightedRoutingRequest::new, + indexNameExpressionResolver + ); + this.weightedRoutingService = weightedRoutingService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterGetWeightedRoutingResponse read(StreamInput in) throws IOException { + return new ClusterGetWeightedRoutingResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(ClusterGetWeightedRoutingRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected void clusterManagerOperation( + final ClusterGetWeightedRoutingRequest request, + ClusterState state, + final ActionListener listener + ) { + try { + weightedRoutingService.verifyAwarenessAttribute(request.getAwarenessAttribute()); + WeightedRoutingMetadata weightedRoutingMetadata = state.metadata().custom(WeightedRoutingMetadata.TYPE); + ClusterGetWeightedRoutingResponse clusterGetWeightedRoutingResponse = new ClusterGetWeightedRoutingResponse(); + String weight = null; + if (weightedRoutingMetadata != null && weightedRoutingMetadata.getWeightedRouting() != null) { + WeightedRouting weightedRouting = weightedRoutingMetadata.getWeightedRouting(); + if (request.local()) { + DiscoveryNode localNode = state.getNodes().getLocalNode(); + if (localNode.getAttributes().get(request.getAwarenessAttribute()) != null) { + String attrVal = localNode.getAttributes().get(request.getAwarenessAttribute()); + if (weightedRouting.weights().containsKey(attrVal)) { + weight = weightedRouting.weights().get(attrVal).toString(); + } + } + } + clusterGetWeightedRoutingResponse = new ClusterGetWeightedRoutingResponse(weight, weightedRouting); + } + listener.onResponse(clusterGetWeightedRoutingResponse); + } catch (Exception ex) { + listener.onFailure(ex); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/package-info.java new file mode 100644 index 0000000000000..45e5b32b72e50 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** get weighted-round robin shard routing weights. */ +package org.opensearch.action.admin.cluster.shards.routing.weighted.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java index 8c29ab2199848..249e313c1f53b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java @@ -17,20 +17,13 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.WeightedRoutingService; -import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.opensearch.action.ValidateActions.addValidationError; /** * Transport action for updating weights for weighted round-robin search routing policy @@ -42,12 +35,9 @@ public class TransportAddWeightedRoutingAction extends TransportClusterManagerNo ClusterPutWeightedRoutingResponse> { private final WeightedRoutingService weightedRoutingService; - private volatile List awarenessAttributes; @Inject public TransportAddWeightedRoutingAction( - Settings settings, - ClusterSettings clusterSettings, TransportService transportService, ClusterService clusterService, WeightedRoutingService weightedRoutingService, @@ -65,19 +55,6 @@ public TransportAddWeightedRoutingAction( indexNameExpressionResolver ); this.weightedRoutingService = weightedRoutingService; - this.awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, - this::setAwarenessAttributes - ); - } - - List getAwarenessAttributes() { - return awarenessAttributes; - } - - private void setAwarenessAttributes(List awarenessAttributes) { - this.awarenessAttributes = awarenessAttributes; } @Override @@ -96,7 +73,12 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) throws Exception { - verifyAwarenessAttribute(request.getWeightedRouting().attributeName()); + try { + weightedRoutingService.verifyAwarenessAttribute(request.getWeightedRouting().attributeName()); + } catch (ActionRequestValidationException ex) { + listener.onFailure(ex); + return; + } weightedRoutingService.registerWeightedRoutingMetadata( request, ActionListener.delegateFailure( @@ -108,21 +90,8 @@ protected void clusterManagerOperation( ); } - private void verifyAwarenessAttribute(String attributeName) { - if (getAwarenessAttributes().contains(attributeName) == false) { - ActionRequestValidationException validationException = null; - - validationException = addValidationError( - String.format(Locale.ROOT, "invalid awareness attribute %s requested for updating weighted routing", attributeName), - validationException - ); - throw validationException; - } - } - @Override protected ClusterBlockException checkBlock(ClusterPutWeightedRoutingRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - } diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index eacd7ff16e6af..c811b788d9cf6 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -92,6 +92,9 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; @@ -816,6 +819,21 @@ public interface ClusterAdminClient extends OpenSearchClient { */ ClusterPutWeightedRoutingRequestBuilder prepareWeightedRouting(); + /** + * Gets weights for weighted round-robin search routing policy. + */ + ActionFuture getWeightedRouting(ClusterGetWeightedRoutingRequest request); + + /** + * Gets weights for weighted round-robin search routing policy. + */ + void getWeightedRouting(ClusterGetWeightedRoutingRequest request, ActionListener listener); + + /** + * Gets weights for weighted round-robin search routing policy. + */ + ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting(); + /** * Decommission awareness attribute */ diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index a033933cf6696..c8729a0d498f4 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -49,6 +49,7 @@ import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; @@ -561,6 +562,15 @@ public static ClusterPutWeightedRoutingRequest putWeightedRoutingRequest(String return new ClusterPutWeightedRoutingRequest(attributeName); } + /** + * Gets weights for weighted round-robin search routing policy + * + * @return get weight request + */ + public static ClusterGetWeightedRoutingRequest getWeightedRoutingRequest(String attributeName) { + return new ClusterGetWeightedRoutingRequest(attributeName); + } + /** * Creates a new decommission request. * diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index f74acd01f107c..65199b22e8e72 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -118,6 +118,10 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; @@ -1302,6 +1306,24 @@ public ClusterPutWeightedRoutingRequestBuilder prepareWeightedRouting() { return new ClusterPutWeightedRoutingRequestBuilder(this, ClusterAddWeightedRoutingAction.INSTANCE); } + @Override + public ActionFuture getWeightedRouting(ClusterGetWeightedRoutingRequest request) { + return execute(ClusterGetWeightedRoutingAction.INSTANCE, request); + } + + @Override + public void getWeightedRouting( + ClusterGetWeightedRoutingRequest request, + ActionListener listener + ) { + execute(ClusterGetWeightedRoutingAction.INSTANCE, request, listener); + } + + @Override + public ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting() { + return new ClusterGetWeightedRoutingRequestBuilder(this, ClusterGetWeightedRoutingAction.INSTANCE); + } + @Override public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { execute(DeleteDanglingIndexAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index da454865ac866..7ff2b23630a3c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -12,18 +12,27 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; +import java.util.List; +import java.util.Locale; + +import static org.opensearch.action.ValidateActions.addValidationError; + /** * * Service responsible for updating cluster state metadata with weighted routing weights */ @@ -31,11 +40,22 @@ public class WeightedRoutingService { private static final Logger logger = LogManager.getLogger(WeightedRoutingService.class); private final ClusterService clusterService; private final ThreadPool threadPool; + private volatile List awarenessAttributes; @Inject - public WeightedRoutingService(ClusterService clusterService, ThreadPool threadPool) { + public WeightedRoutingService( + ClusterService clusterService, + ThreadPool threadPool, + Settings settings, + ClusterSettings clusterSettings + ) { this.clusterService = clusterService; this.threadPool = threadPool; + this.awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + this::setAwarenessAttributes + ); } public void registerWeightedRoutingMetadata( @@ -85,4 +105,23 @@ private boolean checkIfSameWeightsInMetadata( ) { return newWeightedRoutingMetadata.getWeightedRouting().equals(oldWeightedRoutingMetadata.getWeightedRouting()); } + + List getAwarenessAttributes() { + return awarenessAttributes; + } + + private void setAwarenessAttributes(List awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + + public void verifyAwarenessAttribute(String attributeName) { + if (getAwarenessAttributes().contains(attributeName) == false) { + ActionRequestValidationException validationException = null; + validationException = addValidationError( + String.format(Locale.ROOT, "invalid awareness attribute %s requested for updating weighted routing", attributeName), + validationException + ); + throw validationException; + } + } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetWeightedRoutingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetWeightedRoutingAction.java new file mode 100644 index 0000000000000..7c9d1190f0b1d --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetWeightedRoutingAction.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Fetch Weighted Round Robin based shard routing weights + * + * @opensearch.api + * + */ +public class RestClusterGetWeightedRoutingAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestClusterGetWeightedRoutingAction.class); + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cluster/routing/awareness/{attribute}/weights")); + } + + @Override + public String getName() { + return "get_weighted_routing_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + ClusterGetWeightedRoutingRequest getWeightedRoutingRequest = Requests.getWeightedRoutingRequest(request.param("attribute")); + getWeightedRoutingRequest.local(request.paramAsBoolean("local", getWeightedRoutingRequest.local())); + return channel -> client.admin().cluster().getWeightedRouting(getWeightedRoutingRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingRequestTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingRequestTests.java new file mode 100644 index 0000000000000..0a4dad4cbc597 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingRequestTests.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.action.shard.routing.weighted.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; +import org.opensearch.test.OpenSearchTestCase; + +public class ClusterGetWeightedRoutingRequestTests extends OpenSearchTestCase { + + public void testValidate_AwarenessAttributeIsSet() { + ClusterGetWeightedRoutingRequest request = new ClusterGetWeightedRoutingRequest(); + request.setAwarenessAttribute("zone"); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertNull(actionRequestValidationException); + } + + public void testValidate_AwarenessAttributeNotSet() { + ClusterGetWeightedRoutingRequest request = new ClusterGetWeightedRoutingRequest(); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertNotNull(actionRequestValidationException); + assertTrue(actionRequestValidationException.getMessage().contains("Awareness attribute is missing")); + } + + public void testValidate_AwarenessAttributeIsEmpty() { + ClusterGetWeightedRoutingRequest request = new ClusterGetWeightedRoutingRequest(); + request.setAwarenessAttribute(""); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertNotNull(actionRequestValidationException); + assertTrue(actionRequestValidationException.getMessage().contains("Awareness attribute is missing")); + } + +} diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingResponseTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingResponseTests.java new file mode 100644 index 0000000000000..e9add55ca774b --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/ClusterGetWeightedRoutingResponseTests.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.action.shard.routing.weighted.get; + +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Map; + +public class ClusterGetWeightedRoutingResponseTests extends AbstractXContentTestCase { + @Override + protected ClusterGetWeightedRoutingResponse createTestInstance() { + Map weights = Map.of("zone_A", 1.0, "zone_B", 0.0, "zone_C", 1.0); + WeightedRouting weightedRouting = new WeightedRouting("", weights); + ClusterGetWeightedRoutingResponse response = new ClusterGetWeightedRoutingResponse("1", weightedRouting); + return response; + } + + @Override + protected ClusterGetWeightedRoutingResponse doParseInstance(XContentParser parser) throws IOException { + return ClusterGetWeightedRoutingResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java new file mode 100644 index 0000000000000..f28e932e068ac --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java @@ -0,0 +1,262 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.action.shard.routing.weighted.get; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.get.TransportGetWeightedRoutingAction; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.cluster.routing.WeightedRoutingService; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.emptySet; +import static org.mockito.Mockito.mock; + +public class TransportGetWeightedRoutingActionTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private ClusterService clusterService; + private TransportService transportService; + private WeightedRoutingService weightedRoutingService; + private TransportGetWeightedRoutingAction transportGetWeightedRoutingAction; + private ClusterSettings clusterSettings; + NodeClient client; + + final private static Set CLUSTER_MANAGER_ROLE = Collections.unmodifiableSet( + new HashSet<>(Collections.singletonList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + ); + + final private static Set DATA_ROLE = Collections.unmodifiableSet( + new HashSet<>(Collections.singletonList(DiscoveryNodeRole.DATA_ROLE)) + ); + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("test", Settings.EMPTY); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @Before + public void setUpService() { + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); + clusterState = addClusterManagerNodes(clusterState); + clusterState = addDataNodes(clusterState); + clusterState = setLocalNode(clusterState, "nodeA1"); + + ClusterState.Builder builder = ClusterState.builder(clusterState); + ClusterServiceUtils.setState(clusterService, builder); + + final MockTransport transport = new MockTransport(); + transportService = transport.createTransportService( + Settings.EMPTY, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundTransportAddress -> clusterService.state().nodes().get("nodes1"), + null, + Collections.emptySet() + + ); + + Settings.Builder settingsBuilder = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone"); + + clusterSettings = new ClusterSettings(settingsBuilder.build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + transportService.start(); + transportService.acceptIncomingRequests(); + + this.weightedRoutingService = new WeightedRoutingService(clusterService, threadPool, settingsBuilder.build(), clusterSettings); + + this.transportGetWeightedRoutingAction = new TransportGetWeightedRoutingAction( + transportService, + clusterService, + weightedRoutingService, + threadPool, + new ActionFilters(emptySet()), + mock(IndexNameExpressionResolver.class) + ); + client = new NodeClient(Settings.EMPTY, threadPool); + } + + private ClusterState addDataNodes(ClusterState clusterState) { + clusterState = addDataNodeForAZone(clusterState, "zone_A", "nodeA1", "nodeA2", "nodeA3"); + clusterState = addDataNodeForAZone(clusterState, "zone_B", "nodeB1", "nodeB2", "nodeB3"); + clusterState = addDataNodeForAZone(clusterState, "zone_C", "nodeC1", "nodeC2", "nodeC3"); + return clusterState; + } + + private ClusterState addClusterManagerNodes(ClusterState clusterState) { + clusterState = addClusterManagerNodeForAZone(clusterState, "zone_A", "nodeMA"); + clusterState = addClusterManagerNodeForAZone(clusterState, "zone_B", "nodeMB"); + clusterState = addClusterManagerNodeForAZone(clusterState, "zone_C", "nodeMC"); + return clusterState; + } + + private ClusterState addDataNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + org.opensearch.common.collect.List.of(nodeIds) + .forEach( + nodeId -> nodeBuilder.add( + new DiscoveryNode( + nodeId, + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", zone), + DATA_ROLE, + Version.CURRENT + ) + ) + ); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState addClusterManagerNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { + + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + org.opensearch.common.collect.List.of(nodeIds) + .forEach( + nodeId -> nodeBuilder.add( + new DiscoveryNode( + nodeId, + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", zone), + CLUSTER_MANAGER_ROLE, + Version.CURRENT + ) + ) + ); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setLocalNode(ClusterState clusterState, String nodeId) { + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); + nodeBuilder.localNodeId(nodeId); + nodeBuilder.clusterManagerNodeId(nodeId); + clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + return clusterState; + } + + private ClusterState setWeightedRoutingWeights(ClusterState clusterState, Map weights) { + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); + clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); + return clusterState; + } + + public void testGetWeightedRouting_WeightsNotSetInMetadata() { + + final ClusterGetWeightedRoutingRequestBuilder request = new ClusterGetWeightedRoutingRequestBuilder( + client, + ClusterGetWeightedRoutingAction.INSTANCE + ); + request.setAwarenessAttribute("zone"); + ClusterState state = clusterService.state(); + + ClusterGetWeightedRoutingResponse response = ActionTestUtils.executeBlocking(transportGetWeightedRoutingAction, request.request()); + assertEquals(response.getLocalNodeWeight(), null); + assertEquals(response.weights(), null); + } + + public void testGetWeightedRouting_WeightsSetInMetadata() { + ClusterGetWeightedRoutingRequestBuilder request = new ClusterGetWeightedRoutingRequestBuilder( + client, + ClusterGetWeightedRoutingAction.INSTANCE + ); + request.setAwarenessAttribute("zone"); + + ClusterState state = clusterService.state(); + state = setLocalNode(state, "nodeB1"); + Map weights = Map.of("zone_A", 1.0, "zone_B", 0.0, "zone_C", 1.0); + state = setWeightedRoutingWeights(state, weights); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterGetWeightedRoutingResponse response = ActionTestUtils.executeBlocking(transportGetWeightedRoutingAction, request.request()); + assertEquals(weights, response.weights().weights()); + } + + public void testGetWeightedRoutingLocalWeight_WeightsSetInMetadata() { + + ClusterGetWeightedRoutingRequestBuilder request = new ClusterGetWeightedRoutingRequestBuilder( + client, + ClusterGetWeightedRoutingAction.INSTANCE + ); + + request.setRequestLocal(true); + request.setAwarenessAttribute("zone"); + + ClusterState state = clusterService.state(); + state = setLocalNode(state, "nodeB1"); + Map weights = Map.of("zone_A", 1.0, "zone_B", 0.0, "zone_C", 1.0); + state = setWeightedRoutingWeights(state, weights); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterGetWeightedRoutingResponse response = ActionTestUtils.executeBlocking(transportGetWeightedRoutingAction, request.request()); + assertEquals("0.0", response.getLocalNodeWeight()); + } + + public void testGetWeightedRoutingLocalWeight_WeightsNotSetInMetadata() { + + ClusterGetWeightedRoutingRequestBuilder request = new ClusterGetWeightedRoutingRequestBuilder( + client, + ClusterGetWeightedRoutingAction.INSTANCE + ); + + request.setRequestLocal(true); + request.setAwarenessAttribute("zone"); + + ClusterState state = clusterService.state(); + state = setLocalNode(state, "nodeB1"); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterGetWeightedRoutingResponse response = ActionTestUtils.executeBlocking(transportGetWeightedRoutingAction, request.request()); + assertEquals(null, response.getLocalNodeWeight()); + } + + @After + public void shutdown() { + clusterService.stop(); + threadPool.shutdown(); + } + +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index e5cca998d3f06..557c5c7ac910d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -12,6 +12,7 @@ import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; import org.opensearch.client.node.NodeClient; @@ -92,7 +93,7 @@ public void setUpService() { transportService.start(); transportService.acceptIncomingRequests(); - this.weightedRoutingService = new WeightedRoutingService(clusterService, threadPool); + this.weightedRoutingService = new WeightedRoutingService(clusterService, threadPool, settingsBuilder.build(), clusterSettings); client = new NodeClient(Settings.EMPTY, threadPool); } @@ -231,4 +232,20 @@ public void onFailure(Exception e) { weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + + public void testVerifyAwarenessAttribute_InvalidAttributeName() { + assertThrows( + "invalid awareness attribute %s requested for updating weighted routing", + ActionRequestValidationException.class, + () -> weightedRoutingService.verifyAwarenessAttribute("zone2") + ); + } + + public void testVerifyAwarenessAttribute_ValidAttributeName() { + try { + weightedRoutingService.verifyAwarenessAttribute("zone"); + } catch (Exception e) { + fail("verify awareness attribute should not fail"); + } + } } From ed359f0d380659c98d9b313fba9ba4d2d6025e35 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 6 Oct 2022 11:08:17 -0700 Subject: [PATCH 030/122] Fix new race condition in DecommissionControllerTests (#4688) My previous fix introduced a new race condition by making the assertions before waiting on the latch. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../cluster/decommission/DecommissionControllerTests.java | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 868ce6ed7bf9e..a9efbaed8a671 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) +- Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) ### Security diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 5b2dce277189c..06f2de04907d6 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -223,7 +223,9 @@ public void testTimesOut() throws InterruptedException { TimeValue.timeValueMillis(0), new ActionListener<>() { @Override - public void onResponse(Void unused) {} + public void onResponse(Void unused) { + countDownLatch.countDown(); + } @Override public void onFailure(Exception e) { @@ -232,10 +234,10 @@ public void onFailure(Exception e) { } } ); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); MatcherAssert.assertThat(exceptionReference.get(), instanceOf(OpenSearchTimeoutException.class)); MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("waiting for removal of decommissioned nodes")); - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } public void testSuccessfulDecommissionStatusMetadataUpdate() throws InterruptedException { From a2f95ce324d7869701bb8229a28f917af08214f8 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 6 Oct 2022 16:24:31 -0400 Subject: [PATCH 031/122] Fixing SearchStats (de)serialization (#4697) Fixes bwc failure caused by commit 6993ac9834d0f7e8e77e6cade09de2245d2a20f7. Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../rest-api-spec/test/cat.shards/10_basic.yml | 14 +++++++------- .../resources/rest-api-spec/test/pit/10_basic.yml | 4 ++-- .../opensearch/index/search/stats/SearchStats.java | 4 ++-- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9efbaed8a671..b08c21d54be73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) +- Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 6ebe273d552cc..189215b6562a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,14 +1,14 @@ --- "Help": - skip: - version: " - 2.9.99" - reason: point in time stats were added in 3.0.0 + version: " - 2.3.99" + reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "3.0.0 - " + version: "2.4.0 - " - match: $body: | @@ -88,16 +88,16 @@ path.state .+ \n $/ --- -"Help before - 3.0.0": +"Help before - 2.4.0": - skip: - version: "3.0.0 - " - reason: point in time stats were added in 3.0.0 + version: "2.4.0 - " + reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: " - 2.9.99" + version: " - 2.3.99" - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml index cd0c5b9126a9d..847131ead35b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -1,7 +1,7 @@ "Create PIT, Search with PIT ID and Delete": - skip: - version: " - 2.9.99" - reason: "mode to be introduced later than 3.0" + version: " - 2.3.99" + reason: "mode to be introduced later than 2.4.0" - do: indices.create: index: test_pit diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 38b18355fd98d..012c94639d526 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -141,7 +141,7 @@ private Stats(StreamInput in) throws IOException { suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_4_0)) { pitCount = in.readVLong(); pitTimeInMillis = in.readVLong(); pitCurrent = in.readVLong(); @@ -292,7 +292,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeVLong(pitCount); out.writeVLong(pitTimeInMillis); out.writeVLong(pitCurrent); From f5ea1a4d8067896080f61ab5f88cdcd307ee3c84 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Thu, 6 Oct 2022 19:46:07 -0700 Subject: [PATCH 032/122] Fixed misunderstanding message 'No OpenSearchException found' when detailed_error disabled (#4669) * Fixed misunderstanding message 'No OpenSearchException found' when detailed_error disabled Signed-off-by: Xue Zhou --- CHANGELOG.md | 1 + .../java/org/opensearch/ExceptionsHelper.java | 15 +++++++++++++++ .../java/org/opensearch/OpenSearchException.java | 4 +--- .../org/opensearch/ExceptionsHelperTests.java | 6 ++++++ .../org/opensearch/OpenSearchExceptionTests.java | 7 +------ .../opensearch/rest/BytesRestResponseTests.java | 2 +- 6 files changed, 25 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b08c21d54be73..f8bfaf5abfed3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) - [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) - [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) +- Fixed misunderstanding message "No OpenSearchException found" when detailed_error disabled ([#4669](https://github.com/opensearch-project/OpenSearch/pull/4669)) - Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) diff --git a/server/src/main/java/org/opensearch/ExceptionsHelper.java b/server/src/main/java/org/opensearch/ExceptionsHelper.java index f252d0b05af79..fbfc9beaea468 100644 --- a/server/src/main/java/org/opensearch/ExceptionsHelper.java +++ b/server/src/main/java/org/opensearch/ExceptionsHelper.java @@ -99,6 +99,21 @@ public static RestStatus status(Throwable t) { return RestStatus.INTERNAL_SERVER_ERROR; } + public static String summaryMessage(Throwable t) { + if (t != null) { + if (t instanceof OpenSearchException) { + return t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; + } else if (t instanceof IllegalArgumentException) { + return "Invalid argument"; + } else if (t instanceof JsonParseException) { + return "Failed to parse JSON"; + } else if (t instanceof OpenSearchRejectedExecutionException) { + return "Too many requests"; + } + } + return "Internal failure"; + } + public static Throwable unwrapCause(Throwable t) { int counter = 0; Throwable result = t; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 34d7509c7afb2..4b6ca173ec692 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -594,16 +594,14 @@ public static void generateFailureXContent(XContentBuilder builder, Params param // Render the exception with a simple message if (detailed == false) { - String message = "No OpenSearchException found"; Throwable t = e; for (int counter = 0; counter < 10 && t != null; counter++) { if (t instanceof OpenSearchException) { - message = t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; break; } t = t.getCause(); } - builder.field(ERROR, message); + builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); return; } diff --git a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java index d16b2e9d291b0..41051d7ff2dd0 100644 --- a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java @@ -113,6 +113,12 @@ public void testStatus() { assertThat(ExceptionsHelper.status(new OpenSearchRejectedExecutionException("rejected")), equalTo(RestStatus.TOO_MANY_REQUESTS)); } + public void testSummaryMessage() { + assertThat(ExceptionsHelper.summaryMessage(new IllegalArgumentException("illegal")), equalTo("Invalid argument")); + assertThat(ExceptionsHelper.summaryMessage(new JsonParseException(null, "illegal")), equalTo("Failed to parse JSON")); + assertThat(ExceptionsHelper.summaryMessage(new OpenSearchRejectedExecutionException("rejected")), equalTo("Too many requests")); + } + public void testGroupBy() { ShardOperationFailedException[] failures = new ShardOperationFailedException[] { createShardFailureParsingException("error", "node0", "index", 0, null), diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index bd2695508dfcb..6ceb1d6f12e3b 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -814,12 +814,7 @@ public void testFailureToAndFromXContentWithNoDetails() throws IOException { } assertNotNull(parsedFailure); - String reason; - if (failure instanceof OpenSearchException) { - reason = failure.getClass().getSimpleName() + "[" + failure.getMessage() + "]"; - } else { - reason = "No OpenSearchException found"; - } + String reason = ExceptionsHelper.summaryMessage(failure); assertEquals(OpenSearchException.buildMessage("exception", reason, null), parsedFailure.getMessage()); assertEquals(0, parsedFailure.getHeaders().size()); assertEquals(0, parsedFailure.getMetadata().size()); diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 1ea7f006cf482..20a41b1d8d120 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -117,7 +117,7 @@ public void testNonOpenSearchExceptionIsNotShownAsSimpleMessage() throws Excepti assertThat(text, not(containsString("UnknownException[an error occurred reading data]"))); assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); assertThat(text, not(containsString("error_trace"))); - assertThat(text, containsString("\"error\":\"No OpenSearchException found\"")); + assertThat(text, containsString("\"error\":\"Internal failure\"")); } public void testErrorTrace() throws Exception { From b1bc4d96d69e5f1278dfe4aea7e532e1e049c794 Mon Sep 17 00:00:00 2001 From: Vijayan Balasubramanian Date: Fri, 7 Oct 2022 05:27:54 -0700 Subject: [PATCH 033/122] Update GeoGrid base class access modifier to support extensibility (#4572) * Update access modifier to support extensibility Change access modifier from default to protected. This will help to build new geo based aggregation outside OpenSearch, by keeping GeoGrid Classes as base class. Signed-off-by: Vijayan Balasubramanian * Updated CHANGELOG Added PR details to CHANGELOG.md Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGridBucket to BaseGeoGridBucket Update class names, references and comments. Signed-off-by: Vijayan Balasubramanian * Rename InternalGeoGrid to BaseGeoGrid Signed-off-by: Vijayan Balasubramanian * Make GridBucket classes package-private Signed-off-by: Vijayan Balasubramanian * Remove Internal prefix from Geo Grid classes Signed-off-by: Vijayan Balasubramanian * Update constructor and class access modifier Signed-off-by: Vijayan Balasubramanian * Update access modifier based on usage Made classes package private if it is not used outside the package. Signed-off-by: Vijayan Balasubramanian Signed-off-by: Vijayan Balasubramanian --- CHANGELOG.md | 2 + .../org/opensearch/geo/GeoModulePlugin.java | 11 ++-- ...{InternalGeoGrid.java => BaseGeoGrid.java} | 50 +++++++++---------- ...GridBucket.java => BaseGeoGridBucket.java} | 16 +++--- .../bucket/geogrid/BucketPriorityQueue.java | 4 +- .../bucket/geogrid/CellIdSource.java | 2 +- .../aggregations/bucket/geogrid/GeoGrid.java | 4 +- .../geogrid/GeoGridAggregationBuilder.java | 4 +- .../bucket/geogrid/GeoGridAggregator.java | 22 ++++---- ...ernalGeoHashGrid.java => GeoHashGrid.java} | 22 ++++---- .../GeoHashGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoHashGridAggregator.java | 15 +++--- .../geogrid/GeoHashGridAggregatorFactory.java | 4 +- ...ernalGeoTileGrid.java => GeoTileGrid.java} | 22 ++++---- .../GeoTileGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoTileGridAggregator.java | 15 +++--- .../geogrid/GeoTileGridAggregatorFactory.java | 4 +- .../geogrid/InternalGeoHashGridBucket.java | 4 +- .../geogrid/InternalGeoTileGridBucket.java | 4 +- .../bucket/geogrid/ParsedGeoGrid.java | 4 +- .../bucket/geogrid/ParsedGeoGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoHashGrid.java | 2 +- .../geogrid/ParsedGeoHashGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoTileGrid.java | 2 +- ...idAggregationCompositeAggregatorTests.java | 3 +- .../geogrid/GeoGridAggregatorTestCase.java | 12 ++--- .../bucket/geogrid/GeoGridTestCase.java | 16 +++--- .../bucket/geogrid/GeoHashGridTests.java | 11 ++-- .../bucket/geogrid/GeoTileGridTests.java | 11 ++-- .../geo/tests/common/AggregationBuilders.java | 8 +-- .../common/AggregationInspectionHelper.java | 4 +- 31 files changed, 139 insertions(+), 147 deletions(-) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGrid.java => BaseGeoGrid.java} (72%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoGridBucket.java => BaseGeoGridBucket.java} (87%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoHashGrid.java => GeoHashGrid.java} (70%) rename modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/{InternalGeoTileGrid.java => GeoTileGrid.java} (70%) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8bfaf5abfed3..018475c7e9ae6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) +- Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 77abba7f54677..8ca1d2a0c214f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -35,9 +35,8 @@ import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsGeoShapeAggregator; @@ -78,18 +77,18 @@ public List getAggregations() { GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); final AggregationSpec geoTileGrid = new AggregationSpec( GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); return List.of(geoBounds, geoHashGrid, geoTileGrid); } /** - * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * Registering the geotile grid in the {@link CompositeAggregation}. * * @return a {@link List} of {@link CompositeAggregationSpec} */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java similarity index 72% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index 9dbed7b27307a..b58c19a7186e6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -54,30 +54,30 @@ * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGrid extends InternalMultiBucketAggregation< - InternalGeoGrid, - InternalGeoGridBucket> implements GeoGrid { +public abstract class BaseGeoGrid extends InternalMultiBucketAggregation + implements + GeoGrid { protected final int requiredSize; - protected final List buckets; + protected final List buckets; - InternalGeoGrid(String name, int requiredSize, List buckets, Map metadata) { + protected BaseGeoGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, metadata); this.requiredSize = requiredSize; this.buckets = buckets; } - abstract Writeable.Reader getBucketReader(); + protected abstract Writeable.Reader getBucketReader(); /** * Read from a stream. */ - public InternalGeoGrid(StreamInput in) throws IOException { + public BaseGeoGrid(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); - buckets = (List) in.readList(getBucketReader()); + buckets = (List) in.readList(getBucketReader()); } @Override @@ -86,24 +86,24 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeList(buckets); } - abstract InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata); + protected abstract BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata); @Override - public List getBuckets() { + public List getBuckets() { return unmodifiableList(buckets); } @Override - public InternalGeoGrid reduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + public BaseGeoGrid reduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { - InternalGeoGrid grid = (InternalGeoGrid) aggregation; + BaseGeoGrid grid = (BaseGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + BaseGeoGridBucket bucket = (BaseGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -113,13 +113,13 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); } buckets.close(); - InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + BaseGeoGridBucket[] list = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } @@ -128,11 +128,11 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } @Override - protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + protected BaseGeoGridBucket reduceBucket(List buckets, ReduceContext context) { assert buckets.size() > 0; List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } @@ -140,12 +140,12 @@ protected InternalGeoGridBucket reduceBucket(List buckets return createBucket(buckets.get(0).hashAsLong, docCount, aggs); } - abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); @@ -168,7 +168,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; - InternalGeoGrid other = (InternalGeoGrid) obj; + BaseGeoGrid other = (BaseGeoGrid) obj; return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(buckets, other.buckets); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java similarity index 87% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java index 93fcdbd098400..f362d2b3d33d6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java @@ -45,12 +45,12 @@ /** * Base implementation of geogrid aggs * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class BaseGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, - Comparable { + Comparable { protected long hashAsLong; protected long docCount; @@ -58,7 +58,7 @@ public abstract class InternalGeoGridBucket ext long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + public BaseGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.hashAsLong = hashAsLong; @@ -67,7 +67,7 @@ public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregation /** * Read from a stream. */ - public InternalGeoGridBucket(StreamInput in) throws IOException { + public BaseGeoGridBucket(StreamInput in) throws IOException { hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - long hashAsLong() { + public long hashAsLong() { return hashAsLong; } @@ -95,7 +95,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(InternalGeoGridBucket other) { + public int compareTo(BaseGeoGridBucket other) { if (this.hashAsLong > other.hashAsLong) { return 1; } @@ -119,7 +119,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; + BaseGeoGridBucket bucket = (BaseGeoGridBucket) o; return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index 70d0552b3e80b..83fcdf4f66424 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -38,14 +38,14 @@ * * @opensearch.internal */ -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends PriorityQueue { BucketPriorityQueue(int size) { super(size); } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + protected boolean lessThan(BaseGeoGridBucket o1, BaseGeoGridBucket o2) { int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); if (cmp == 0) { cmp = o2.compareTo(o1); diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index d40029e9a762d..89ce288770185 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -43,7 +43,7 @@ * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. * - * @opensearch.internal + * @opensearch.api */ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index 4ae888640efc8..b2fe6e33ef95c 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -39,13 +39,13 @@ * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. * - * @opensearch.internal + * @opensearch.api */ public interface GeoGrid extends MultiBucketsAggregation { /** * A bucket that is associated with a geo-grid cell. The key of the bucket is - * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell + * the {@link BaseGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket {} diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 4a904b3aa2b16..0ca2a28844f99 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -58,9 +58,9 @@ import java.util.function.Function; /** - * Base Aggregation Builder for geohash_grid and geotile_grid aggs + * Base Aggregation Builder for geogrid aggs * - * @opensearch.internal + * @opensearch.api */ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { /* recognized field names in JSON */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 909772c61a960..db07ac8f947e5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -55,16 +55,16 @@ /** * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. * - * @opensearch.internal + * @opensearch.api */ -public abstract class GeoGridAggregator extends BucketsAggregator { +public abstract class GeoGridAggregator extends BucketsAggregator { protected final int requiredSize; protected final int shardSize; protected final ValuesSource.Numeric valuesSource; protected final LongKeyedBucketOrds bucketOrds; - GeoGridAggregator( + protected GeoGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -118,23 +118,23 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } - abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); + protected abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); /** * This method is used to return a re-usable instance of the bucket when building * the aggregation. - * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + * @return a new {@link BaseGeoGridBucket} implementation with empty parameters */ - abstract InternalGeoGridBucket newEmptyBucket(); + protected abstract BaseGeoGridBucket newEmptyBucket(); @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; + BaseGeoGridBucket[][] topBucketsPerOrd = new BaseGeoGridBucket[owningBucketOrds.length][]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + BaseGeoGridBucket spare = null; LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); while (ordsEnum.next()) { if (spare == null) { @@ -149,7 +149,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I spare = ordered.insertWithOverflow(spare); } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { topBucketsPerOrd[ordIdx][i] = ordered.pop(); } @@ -163,7 +163,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } @Override - public InternalGeoGrid buildEmptyAggregation() { + public BaseGeoGrid buildEmptyAggregation() { return buildAggregation(name, requiredSize, Collections.emptyList(), metadata()); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java index ff1247300939a..aa1d5504ad24f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoHashGrid extends InternalGeoGrid { +public class GeoHashGrid extends BaseGeoGrid { - InternalGeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoHashGrid(StreamInput in) throws IOException { + public GeoHashGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoHashGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index bbaf9613fb216..760d7d643c0a5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geohash_grid * - * @opensearch.internal + * @opensearch.api */ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geohash_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 6ca7a4d8a9cb8..9ff9fe7d8f9ba 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -47,9 +47,9 @@ * * @opensearch.internal */ -public class GeoHashGridAggregator extends GeoGridAggregator { +class GeoHashGridAggregator extends GeoGridAggregator { - public GeoHashGridAggregator( + GeoHashGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -64,16 +64,17 @@ public GeoHashGridAggregator( } @Override - InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected GeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoHashGrid buildEmptyAggregation() { + return new GeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoHashGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 1914c07e831f7..898a7d82a4dec 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -86,7 +86,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, emptyList(), metadata); + final InternalAggregation aggregation = new GeoHashGrid(name, requiredSize, emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java index fa544b5893f0c..91c523c80855e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoTileGrid extends InternalGeoGrid { +public class GeoTileGrid extends BaseGeoGrid { - InternalGeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoTileGrid(StreamInput in) throws IOException { + public GeoTileGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoTileGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 76ad515f34fe5..0f1f87bdc57fa 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geotile_grid agg * - * @opensearch.internal + * @opensearch.api */ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geotile_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index a205a9afde41e..8faed4e9cd2d4 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -48,9 +48,9 @@ * * @opensearch.internal */ -public class GeoTileGridAggregator extends GeoGridAggregator { +class GeoTileGridAggregator extends GeoGridAggregator { - public GeoTileGridAggregator( + GeoTileGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -65,16 +65,17 @@ public GeoTileGridAggregator( } @Override - InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected GeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoTileGrid buildEmptyAggregation() { - return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoTileGrid buildEmptyAggregation() { + return new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoTileGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b830988a3d410..6eb73727ad6c8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -85,7 +85,7 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); + final InternalAggregation aggregation = new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index 659909e868651..6e7ed8a679681 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoHashGridBucket extends BaseGeoGridBucket { InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -51,7 +51,7 @@ public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoTileGridBucket extends BaseGeoGridBucket { InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -52,7 +52,7 @@ public class InternalGeoTileGridBucket extends InternalGeoGridBucket implements GeoGrid { @@ -63,7 +63,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 80124cda50b19..cbe3a2ee89dd7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -40,7 +40,7 @@ /** * A single geo grid bucket result parsed between nodes * - * @opensearch.internal + * @opensearch.api */ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 109524e755c4d..343149f8e19ab 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGrid extends ParsedGeoGrid { +class ParsedGeoHashGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoHashGrid::new, diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 4e6e454b08324..6704273f45580 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { +class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 8734c96a15578..cb64a0e153e87 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoTileGrid extends ParsedGeoGrid { +class ParsedGeoTileGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoTileGrid::new, diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index 3c7c292f9d193..bc7fde8d66d0a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -31,7 +30,7 @@ import java.util.Map; /** - * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + * Testing the geo tile grid as part of CompositeAggregation. */ public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..5ec10a7f4f7cf 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -73,7 +73,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -201,9 +201,9 @@ public void testAsSubAgg() throws IOException { Consumer verify = (terms) -> { Map> actual = new TreeMap<>(); for (StringTerms.Bucket tb : terms.getBuckets()) { - InternalGeoGrid gg = tb.getAggregations().get("gg"); + BaseGeoGrid gg = tb.getAggregations().get("gg"); Map sub = new TreeMap<>(); - for (InternalGeoGridBucket ggb : gg.getBuckets()) { + for (BaseGeoGridBucket ggb : gg.getBuckets()) { sub.put(ggb.getKeyAsString(), ggb.getDocCount()); } actual.put(tb.getKeyAsString(), sub); @@ -299,7 +299,7 @@ private void testCase( String field, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex ) throws IOException { testCase(query, precision, geoBoundingBox, verify, buildIndex, createBuilder("_name").field(field)); @@ -309,7 +309,7 @@ private void testCase( Query query, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex, GeoGridAggregationBuilder aggregationBuilder ) throws IOException { @@ -333,7 +333,7 @@ private void testCase( aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((InternalGeoGrid) aggregator.buildTopLevel()); + verify.accept((BaseGeoGrid) aggregator.buildTopLevel()); indexReader.close(); directory.close(); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 432736a2b43fe..2a655239997b6 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -50,16 +50,16 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridTestCase> extends - InternalMultiBucketAggregationTestCase { +public abstract class GeoGridTestCase> extends InternalMultiBucketAggregationTestCase< + T> { /** - * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGrid}-derived class using the same parameters as constructor. */ - protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); + protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); /** - * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGridBucket}-derived class using the same parameters as constructor. */ protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); @@ -117,7 +117,7 @@ protected List getNamedXContents() { protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); @@ -176,7 +176,7 @@ protected Class implementationClass() { protected T mutateInstance(T instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { case 0: @@ -206,7 +206,7 @@ protected T mutateInstance(T instance) { } public void testCreateFromBuckets() { - InternalGeoGrid original = createTestInstance(); + BaseGeoGrid original = createTestInstance(); assertThat(original, equalTo(original.create(original.buckets))); } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c84c6ef5ec076..ada943b6dd369 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoHashGridTests extends GeoGridTestCase { +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected InternalGeoHashGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoHashGrid(name, size, buckets, metadata); + protected GeoHashGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoHashGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index ead67e0455d94..b59e9ec2cff53 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoTileGridTests extends GeoGridTestCase { +public class GeoTileGridTests extends GeoGridTestCase { @Override - protected InternalGeoTileGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoTileGrid(name, size, buckets, metadata); + protected GeoTileGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoTileGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c0d7e51047c6b..706c73e7416f5 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -10,8 +10,8 @@ import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -24,14 +24,14 @@ public static GeoBoundsAggregationBuilder geoBounds(String name) { } /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + * Create a new {@link GeoHashGrid} aggregation with the given name. */ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + * Create a new {@link GeoTileGrid} aggregation with the given name. */ public static GeoTileGridAggregationBuilder geotileGrid(String name) { return new GeoTileGridAggregationBuilder(name); diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 3473cf2d94b76..89debdf5abd95 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,7 +8,7 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.BaseGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -17,7 +17,7 @@ public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } - public static boolean hasValue(InternalGeoGrid agg) { + public static boolean hasValue(BaseGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } } From ef45809468ff381498c69a8b0af07e7443967836 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 08:44:54 -0500 Subject: [PATCH 034/122] [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants (#2768) * [Remove] LegacyESVersion.V_7_0_* and V_7_1_* constants Removes all usages of LegacyESVersion.V_7_0_ and LegacyESVersion.V_7_1 version constants. Signed-off-by: Nicholas Walter Knize * Rebase from main Signed-off-by: Nicholas Walter Knize * fix serialization issue with build flavor removal Signed-off-by: Nicholas Walter Knize * remove stale bwc test Signed-off-by: Nicholas Walter Knize * rebase and update Signed-off-by: Nicholas Walter Knize * cleanup Signed-off-by: Nicholas Walter Knize * fix failing mapper test Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 34 +-- .../common/NGramTokenFilterFactory.java | 29 +- .../mustache/MultiSearchTemplateResponse.java | 11 +- ...GoogleCloudStorageRetryingInputStream.java | 3 +- .../s3/S3RetryingInputStream.java | 3 +- .../upgrades/FullClusterRestartIT.java | 9 +- .../upgrades/QueryBuilderBWCIT.java | 3 +- .../upgrades/JodaCompatibilityIT.java | 282 ------------------ .../org/opensearch/upgrades/MappingIT.java | 70 ----- .../org/opensearch/upgrades/RecoveryIT.java | 9 +- .../upgrades/SystemIndicesUpgradeIT.java | 13 +- .../src/main/java/org/opensearch/Build.java | 41 +-- .../java/org/opensearch/LegacyESVersion.java | 4 - .../org/opensearch/OpenSearchException.java | 2 +- .../src/main/java/org/opensearch/Version.java | 2 +- .../cluster/state/ClusterStateResponse.java | 8 - .../indices/alias/IndicesAliasesRequest.java | 8 +- .../indices/create/CreateIndexRequest.java | 7 - .../TransportGetFieldMappingsIndexAction.java | 4 +- .../mapping/put/PutMappingRequest.java | 6 - .../action/delete/DeleteRequest.java | 7 - ...TransportFieldCapabilitiesIndexAction.java | 3 +- .../org/opensearch/action/get/GetRequest.java | 7 - .../action/get/MultiGetRequest.java | 7 - .../opensearch/action/index/IndexRequest.java | 6 - .../opensearch/action/main/MainResponse.java | 7 - .../action/search/MultiSearchResponse.java | 11 +- .../action/search/SearchRequest.java | 8 +- .../termvectors/TermVectorsRequest.java | 8 - .../action/update/UpdateRequest.java | 29 -- .../cluster/coordination/Coordinator.java | 9 +- .../cluster/metadata/MappingMetadata.java | 7 - .../opensearch/cluster/metadata/Metadata.java | 34 +-- .../metadata/MetadataCreateIndexService.java | 27 +- .../cluster/routing/UnassignedInfo.java | 6 +- .../java/org/opensearch/common/Rounding.java | 19 +- .../breaker/CircuitBreakingException.java | 11 +- .../java/org/opensearch/common/joda/Joda.java | 13 - .../org/opensearch/common/lucene/Lucene.java | 56 ++-- .../java/org/opensearch/env/NodeMetadata.java | 6 +- .../opensearch/gateway/GatewayMetaState.java | 5 +- .../index/analysis/AnalysisRegistry.java | 11 +- .../analysis/ShingleTokenFilterFactory.java | 47 +-- .../index/mapper/DateFieldMapper.java | 7 +- .../index/mapper/DocumentMapper.java | 7 +- .../index/mapper/DocumentMapperParser.java | 2 +- .../mapper/LegacyGeoShapeFieldMapper.java | 7 +- .../index/mapper/MapperService.java | 6 +- .../index/mapper/RangeFieldMapper.java | 8 +- .../index/query/MultiMatchQueryBuilder.java | 7 - .../index/similarity/SimilarityProviders.java | 48 +-- .../index/similarity/SimilarityService.java | 60 +--- .../opensearch/indices/IndicesService.java | 9 +- .../indices/analysis/AnalysisModule.java | 17 +- .../indices/mapper/MapperRegistry.java | 14 +- .../org/opensearch/monitor/jvm/JvmInfo.java | 16 +- .../org/opensearch/script/ScriptStats.java | 6 +- .../org/opensearch/search/DocValueFormat.java | 44 +-- .../pipeline/InternalPercentilesBucket.java | 12 +- ...tilesBucketPipelineAggregationBuilder.java | 11 +- .../PercentilesBucketPipelineAggregator.java | 11 +- .../BaseMultiValuesSourceFieldConfig.java | 13 +- .../ValuesSourceAggregationBuilder.java | 14 +- .../search/builder/SearchSourceBuilder.java | 12 +- .../search/dfs/DfsSearchResult.java | 28 +- .../search/internal/ShardSearchRequest.java | 10 +- .../opensearch/search/slice/SliceBuilder.java | 12 +- .../opensearch/search/suggest/Suggest.java | 65 +--- .../completion/context/GeoContextMapping.java | 38 +-- .../search/suggest/term/TermSuggestion.java | 10 +- .../opensearch/snapshots/RestoreService.java | 10 - .../transport/RemoteConnectionInfo.java | 71 +---- .../test/java/org/opensearch/BuildTests.java | 13 +- .../ExceptionSerializationTests.java | 2 +- .../java/org/opensearch/VersionTests.java | 2 +- .../state/ClusterStateRequestTests.java | 15 +- .../action/search/SearchRequestTests.java | 7 +- .../coordination/JoinTaskExecutorTests.java | 20 +- .../MetadataIndexStateServiceTests.java | 90 ------ .../common/settings/SettingsTests.java | 3 +- .../index/query/RangeQueryBuilderTests.java | 2 +- .../similarity/SimilarityServiceTests.java | 8 +- .../indices/IndicesModuleTests.java | 28 +- .../indices/IndicesServiceTests.java | 13 +- .../search/query/QuerySearchResultTests.java | 33 -- .../test/rest/yaml/section/DoSection.java | 13 - .../AbstractFullClusterRestartTestCase.java | 5 +- 88 files changed, 241 insertions(+), 1481 deletions(-) delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java delete mode 100644 qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 018475c7e9ae6..e1b23eaa5ac20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) ### Fixed diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 57865e15d523a..7cad01c5cc00a 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -485,19 +485,10 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [edge_ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } - return new EdgeNGramTokenFilter(reader, 1); + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); })); filters.add( PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)) @@ -524,19 +515,10 @@ public List getPreConfiguredTokenFilters() { ); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.openSearchVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } - return new NGramTokenFilter(reader, 1, 2, false); + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java index 218bb74b84667..a6adf680a454c 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -54,25 +53,15 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" - + maxAllowedNgramDiff - + "] but was [" - + ngramDiff - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "ngram_big_difference", - "Deprecated big difference between max_gram and min_gram in NGram Tokenizer," - + "expected difference must be less than or equal to: [" - + maxAllowedNgramDiff - + "]" - ); - } + throw new IllegalArgumentException( + "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + + maxAllowedNgramDiff + + "] but was [" + + ngramDiff + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + + "] index level setting." + ); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index 1802d03e20942..7c2c403fdd487 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; @@ -125,11 +124,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = -1L; - } + tookInMillis = in.readVLong(); } MultiSearchTemplateResponse(Item[] items, long tookInMillis) { @@ -159,9 +154,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 72d3e37466d09..5448799e7f81b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -42,7 +42,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.SpecialPermission; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.internal.io.IOUtils; @@ -61,7 +60,7 @@ /** * Wrapper around reads from GCS that will retry blob downloads that fail part-way through, resuming from where the failure occurred. * This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. */ class GoogleCloudStorageRetryingInputStream extends InputStream { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 388f5b8d74a2b..f751d63232f79 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.core.internal.io.IOUtils; import java.io.IOException; @@ -52,7 +51,7 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. * * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 714d8a252579f..0ed51b9d8a011 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -329,9 +329,6 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -1253,7 +1250,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); - if (randomBoolean() || getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { + if (randomBoolean()) { // this is the default after v7.0.0, but is required before that settings.field("soft_deletes.enabled", true); } @@ -1436,10 +1433,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index de042cb2b7634..856dc45d42203 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -46,6 +46,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.DisMaxQueryBuilder; @@ -157,7 +158,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(LegacyESVersion.V_7_0_0) ? "doc" : "_doc"; + final String type = MapperService.SINGLE_MAPPING_NAME; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java deleted file mode 100644 index 0ef1e3a5050af..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.apache.http.HttpStatus; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Node; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Booleans; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.DocValueFormat; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; - -/** - * This is test is meant to verify that when upgrading from 6.x version to 7.7 or newer it is able to parse date fields with joda pattern. - * - * The test is indexing documents and searches with use of joda or java pattern. - * In order to make sure that serialization logic is used a search call is executed 3 times (using all nodes). - * It cannot be guaranteed that serialization logic will always be used as it might happen that - * all shards are allocated on the same node and client is connecting to it. - * Because of this warnings assertions have to be ignored. - * - * A special flag used when serializing {@link DocValueFormat.DateTime#writeTo DocValueFormat.DateTime::writeTo} - * is used to indicate that an index was created in 6.x and has a joda pattern. The same flag is read when - * {@link DocValueFormat.DateTime#DateTime(StreamInput)} deserializing. - * When upgrading from 7.0-7.6 to 7.7 there is no way to tell if a pattern was created in 6.x as this flag cannot be added. - * Hence a skip assume section in init() - * - * @see org.opensearch.search.DocValueFormat.DateTime - */ -public class JodaCompatibilityIT extends AbstractRollingTestCase { - - @BeforeClass - public static void init(){ - assumeTrue("upgrading from 7.0-7.6 will fail parsing joda formats", - UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - } - - public void testJodaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("joda_time", "YYYY-MM-dd'T'HH:mm:ssZZ"); - createTestIndex.setOptions(ignoreWarnings()); - - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("joda_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("joda_time", minute); - - Request search = dateRangeSearch("joda_time"); - search.setOptions(ignoreWarnings()); - - performOnAllNodes(search, r -> assertEquals(HttpStatus.SC_OK, r.getStatusLine().getStatusCode())); - break; - case UPGRADED: - postNewDoc("joda_time", 4); - - search = searchWithAgg("joda_time"); - search.setOptions(ignoreWarnings()); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - break; - } - } - - public void testJavaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("java_time", "8yyyy-MM-dd'T'HH:mm:ssXXX"); - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("java_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("java_time", minute); - - Request search = dateRangeSearch("java_time"); - Response searchResp = client().performRequest(search); - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - break; - case UPGRADED: - postNewDoc("java_time", 4); - - search = searchWithAgg("java_time"); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - - break; - } - } - - private RequestOptions ignoreWarnings() { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - return options.build(); - } - - private void performOnAllNodes(Request search, Consumer consumer) throws IOException { - List nodes = client().getNodes(); - for (Node node : nodes) { - client().setNodes(Collections.singletonList(node)); - Response response = client().performRequest(search); - consumer.accept(response); - assertEquals(HttpStatus.SC_OK, response.getStatusLine().getStatusCode()); - } - client().setNodes(nodes); - } - - private void assertResponseHasAllDocuments(Response searchResp) { - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - try { - assertEquals(removeWhiteSpace("{" + - " \"_shards\": {" + - " \"total\": 3," + - " \"successful\": 3" + - " },"+ - " \"hits\": {" + - " \"total\": 4," + - " \"hits\": [" + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:01+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:02+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:03+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:04+01:00\"" + - " }" + - " }" + - " ]" + - " }" + - "}"), - EntityUtils.toString(searchResp.getEntity(), StandardCharsets.UTF_8)); - } catch (IOException e) { - throw new AssertionError("Exception during response parising", e); - } - } - - private String removeWhiteSpace(String input) { - return input.replaceAll("[\\n\\r\\t\\ ]", ""); - } - - private Request dateRangeSearch(String endpoint) { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - search.setJsonEntity("" + - "{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - - private Request searchWithAgg(String endpoint) throws IOException { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - - search.setJsonEntity("{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " },\n" + - " \"aggs\" : {\n" + - " \"docs_per_year\" : {\n" + - " \"date_histogram\" : {\n" + - " \"field\" : \"date\",\n" + - " \"calendar_interval\" : \"year\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - private Request indexWithDateField(String indexName, String format) { - Request createTestIndex = new Request("PUT", indexName); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity("{\n" + - " \"settings\": {\n" + - " \"index.number_of_shards\": 3\n" + - " },\n" + - " \"mappings\": {\n" + - " \"properties\": {\n" + - " \"datetime\": {\n" + - " \"type\": \"date\",\n" + - " \"format\": \"" + format + "\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - return createTestIndex; - } - - private void postNewDoc(String endpoint, int minute) throws IOException { - Request putDoc = new Request("POST", endpoint+"/_doc"); - putDoc.addParameter("refresh", "true"); - putDoc.addParameter("wait_for_active_shards", "all"); - putDoc.setJsonEntity("{\n" + - " \"datetime\": \"2020-01-01T00:00:0" + minute + "+01:00\"\n" + - "}" - ); - Response resp = client().performRequest(putDoc); - assertEquals(HttpStatus.SC_CREATED, resp.getStatusLine().getStatusCode()); - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java deleted file mode 100644 index 07b1d67fde7ff..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.support.XContentMapValues; - -public class MappingIT extends AbstractRollingTestCase { - /** - * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) - * and check that it can be upgraded to 7x. - */ - public void testAllFieldDisable6x() throws Exception { - assumeTrue("_all", UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = new Request("PUT", "all-index"); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity( - "{ \"settings\": { \"index.number_of_shards\": 1 }, " + - "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" - ); - createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + - " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); - Response resp = client().performRequest(createTestIndex); - assertEquals(200, resp.getStatusLine().getStatusCode()); - break; - - default: - final Request request = new Request("GET", "all-index"); - Response response = client().performRequest(request); - assertEquals(200, response.getStatusLine().getStatusCode()); - Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); - assertNotNull(enabled); - assertEquals(false, enabled); - break; - } - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index cbf91fa9d71e7..3d71f4a198aac 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; @@ -244,6 +245,7 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } + return id; } return null; } @@ -270,6 +272,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); break; case MIXED: + // todo: verify this test can be removed in 3.0.0 final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -348,11 +351,7 @@ public void testRecovery() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - if (getNodeId(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)) == null) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); - } else { - client().performRequest(new Request("DELETE", index + "/_doc/" + i)); - } + client().performRequest(new Request("DELETE", index + "/" + MapperService.SINGLE_MAPPING_NAME + "/" + i)); } } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index c50af0084b000..634dc0628f27a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -59,13 +59,8 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"_doc\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } else { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -91,10 +86,6 @@ public void testSystemIndicesUpgrades() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/server/src/main/java/org/opensearch/Build.java b/server/src/main/java/org/opensearch/Build.java index 364b17ad4aa33..13c951b10cfe3 100644 --- a/server/src/main/java/org/opensearch/Build.java +++ b/server/src/main/java/org/opensearch/Build.java @@ -207,58 +207,27 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { - final String distribution; - final Type type; // the following is new for opensearch: we write the distribution to support any "forks" - if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - distribution = in.readString(); - } else { - distribution = "other"; - } - - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - // (Integ test zip still write OSS as distribution) - // See issue: https://github.com/opendistro-for-elasticsearch/search/issues/159 - if (in.getVersion().before(Version.V_1_3_0)) { - String flavor = in.readString(); - } + final String distribution = in.readString(); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); + final Type type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - - final String version; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + final String version = in.readString(); return new Build(type, hash, date, snapshot, version, distribution); } public static void writeBuild(Build build, StreamOutput out) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code - if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeString(build.distribution); - } + out.writeString(build.distribution); - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - if (out.getVersion().before(Version.V_1_3_0)) { - out.writeString("oss"); - } final Type buildType = build.type(); out.writeString(buildType.displayName()); out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index d4ac3c7d2f8b1..1eb22a6bef3b5 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,10 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_0_0 = new LegacyESVersion(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_0_1 = new LegacyESVersion(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_0 = new LegacyESVersion(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_1 = new LegacyESVersion(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4b6ca173ec692..17ece23f819a2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -1533,7 +1533,7 @@ private enum OpenSearchExceptionHandle { org.opensearch.cluster.coordination.CoordinationStateRejectedException.class, org.opensearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - LegacyESVersion.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.opensearch.snapshots.SnapshotInProgressException.class, diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1bffe9ec98ec5..3387eee2dffc8 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -412,7 +412,7 @@ private Version computeMinIndexCompatVersion() { } else if (major == 7 || major == 1) { return LegacyESVersion.fromId(6000026); } else if (major == 2) { - return LegacyESVersion.V_7_0_0; + return LegacyESVersion.fromId(7000099); } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 89cd112d30c79..d2d7d843e19db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,14 +32,12 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -59,9 +57,6 @@ public ClusterStateResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - new ByteSizeValue(in); - } waitForTimedOut = in.readBoolean(); } @@ -98,9 +93,6 @@ public boolean isWaitForTimedOut() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeOptionalWriteable(clusterState); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - ByteSizeValue.ZERO.writeTo(out); - } out.writeBoolean(waitForTimedOut); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..eb2d2706a6531 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -284,9 +284,7 @@ public AliasActions(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { isHidden = in.readOptionalBoolean(); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - originalAliases = in.readStringArray(); - } + originalAliases = in.readStringArray(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { mustExist = in.readOptionalBoolean(); } else { @@ -308,9 +306,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { out.writeOptionalBoolean(isHidden); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(originalAliases); - } + out.writeStringArray(originalAliases); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalBoolean(mustExist); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..302c2aad64bb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -126,9 +125,6 @@ public CreateIndexRequest(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(new Alias(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -505,9 +501,6 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 64f76db5e1549..6d238a385231f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -120,8 +119,7 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); - Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(indexCreatedVersion, f); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper documentMapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..a8eeedd4a3e4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -117,9 +117,6 @@ public PutMappingRequest(StreamInput in) throws IOException { } } source = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { @@ -349,9 +346,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index ce723df0c383a..86880c0211c1d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -96,9 +95,6 @@ public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); ifSeqNo = in.readZLong(); @@ -280,9 +276,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeLong(version); out.writeByte(versionType.getValue()); out.writeZLong(ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 99962741299ca..7d9ab4ff93f59 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -153,8 +153,7 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd for (String field : fieldNames) { MappedFieldType ft = mapperService.fieldType(field); if (ft != null) { - if (indicesService.isMetadataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) - || fieldPredicate.test(ft.name())) { + if (indicesService.isMetadataField(field) || fieldPredicate.test(ft.name())) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 5f740ba789bb2..64148f070cc16 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; @@ -89,9 +88,6 @@ public GetRequest() {} } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); - } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -260,9 +256,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); - } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 00df8657736ae..91f506dafafe1 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; @@ -114,9 +113,6 @@ public Item(StreamInput in) throws IOException { } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -211,9 +207,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index f863c4a11340e..381eca2dc716f 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -148,9 +148,6 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } id = in.readOptionalString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); version = in.readLong(); @@ -669,9 +666,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeBytesReference(source); out.writeByte(opType.getId()); out.writeLong(version); diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 691bbda512275..0fbfdab9ba294 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; @@ -71,9 +70,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); - } } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -111,9 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); - } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 6c25a16a65c75..c4ba3becbc151 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; @@ -147,11 +146,7 @@ public MultiSearchResponse(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = new Item(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = 0L; - } + tookInMillis = in.readVLong(); } public MultiSearchResponse(Item[] items, long tookInMillis) { @@ -184,9 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index da34dab6383d9..e4dd0d0b1a116 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -251,9 +251,7 @@ public SearchRequest(StreamInput in) throws IOException { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; finalReduce = true; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - ccsMinimizeRoundtrips = in.readBoolean(); - } + ccsMinimizeRoundtrips = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { cancelAfterTimeInterval = in.readOptionalTimeValue(); @@ -288,9 +286,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); out.writeBoolean(finalReduce); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(ccsMinimizeRoundtrips); - } + out.writeBoolean(ccsMinimizeRoundtrips); if (out.getVersion().onOrAfter(Version.V_1_1_0)) { out.writeOptionalTimeValue(cancelAfterTimeInterval); diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index cf2d10d2f1db3..dcd5feda0004a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.termvectors; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -189,10 +188,6 @@ public TermVectorsRequest() {} xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } preference = in.readOptionalString(); long flags = in.readVLong(); @@ -541,9 +536,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(xContentType); } out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalString(preference); long longFlags = 0; for (Flag flag : flagsEnum) { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index d434f134f4321..abd3c31597c18 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -170,9 +170,6 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } if (in.readBoolean()) { script = new Script(in); } @@ -181,26 +178,11 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti if (in.readBoolean()) { doc = new IndexRequest(shardId, in); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - String[] fields = in.readOptionalStringArray(); - if (fields != null) { - throw new IllegalArgumentException("[fields] is no longer supported"); - } - } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { upsertRequest = new IndexRequest(shardId, in); } docAsUpsert = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - long version = in.readLong(); - VersionType versionType = VersionType.readFromStream(in); - if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { - throw new UnsupportedOperationException( - "versioned update requests have been removed in 7.0. Use if_seq_no and if_primary_term" - ); - } - } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); @@ -893,10 +875,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } - boolean hasScript = script != null; out.writeBoolean(hasScript); if (hasScript) { @@ -917,9 +895,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { doc.writeTo(out); } } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalStringArray(null); - } out.writeOptionalWriteable(fetchSourceContext); if (upsertRequest == null) { out.writeBoolean(false); @@ -935,10 +910,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } } out.writeBoolean(docAsUpsert); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeLong(Versions.MATCH_ANY); - out.writeByte(VersionType.INTERNAL.getValue()); - } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c7e7cd0419e2..7ac716084793d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -1771,14 +1770,8 @@ protected void sendApplyCommit( } } - // TODO: only here temporarily for BWC development, remove once complete - public static Settings.Builder addZen1Attribute(boolean isZen1Node, Settings.Builder builder) { - return builder.put("node.attr.zen1", isZen1Node); - } - // TODO: only here temporarily for BWC development, remove once complete public static boolean isZen1Node(DiscoveryNode discoveryNode) { - return discoveryNode.getVersion().before(LegacyESVersion.V_7_0_0) - || (Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false"))); + return Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false")); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 35ee222541771..223127783621e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -161,9 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); // routing out.writeBoolean(routingRequired); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(false); // hasParentField - } } @Override @@ -190,9 +186,6 @@ public MappingMetadata(StreamInput in) throws IOException { source = CompressedXContent.readCompressedString(in); // routing routingRequired = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // hasParentField - } } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index eb5e8bbc2d49b..fb8123f20e904 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -981,15 +981,9 @@ private static class MetadataDiff implements Diff { MetadataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - clusterUUIDCommitted = in.readBoolean(); - } + clusterUUIDCommitted = in.readBoolean(); version = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata = new CoordinationMetadata(in); - } else { - coordinationMetadata = CoordinationMetadata.EMPTY_METADATA; - } + coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1005,13 +999,9 @@ private static class MetadataDiff implements Diff { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } + out.writeBoolean(clusterUUIDCommitted); out.writeLong(version); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1043,12 +1033,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.clusterUUIDCommitted = in.readBoolean(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.coordinationMetadata(new CoordinationMetadata(in)); - } + builder.clusterUUIDCommitted = in.readBoolean(); + builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { @@ -1074,12 +1060,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + out.writeBoolean(clusterUUIDCommitted); + coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d78e5e872fd2b..36e25b5458b76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; @@ -1385,21 +1384,17 @@ static void prepareResizeIndexSettings( * the less default split operations are supported */ public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_0_0)) { - // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour - // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters - // will always have the behavior of the min node in the cluster. - // - // We use as a default number of routing shards the higher number that can be expressed - // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. - int log2MaxNumShards = 10; // logBase2(1024) - int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) - int numSplits = log2MaxNumShards - log2NumShards; - numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once - return numShards * 1 << numSplits; - } else { - return numShards; - } + // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour + // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters + // will always have the behavior of the min node in the cluster. + // + // We use as a default number of routing shards the higher number that can be expressed + // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. + int log2MaxNumShards = 10; // logBase2(1024) + int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) + int numSplits = log2MaxNumShards - log2NumShards; + numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once + return numShards * 1 << numSplits; } public static void validateTranslogRetentionSettings(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 49c18fe44cc04..489c6125f7d13 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -322,11 +322,7 @@ public UnassignedInfo(StreamInput in) throws IOException { } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && reason == Reason.INDEX_CLOSED) { - out.writeByte((byte) Reason.REINITIALIZED.ordinal()); - } else { - out.writeByte((byte) reason.ordinal()); - } + out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index c396f6c88fd57..7160cb1e6d233 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -459,20 +459,13 @@ static class TimeUnitRounding extends Rounding { } TimeUnitRounding(StreamInput in) throws IOException { - this( - DateTimeUnit.resolve(in.readByte()), - in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString()) - ); + this(DateTimeUnit.resolve(in.readByte()), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override @@ -924,17 +917,13 @@ static class TimeIntervalRounding extends Rounding { } TimeIntervalRounding(StreamInput in) throws IOException { - this(in.readVLong(), in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString())); + this(in.readVLong(), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java index ee9977bfa2eb0..fda089cf26942 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java +++ b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java @@ -31,7 +31,6 @@ package org.opensearch.common.breaker; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -55,11 +54,7 @@ public CircuitBreakingException(StreamInput in) throws IOException { super(in); byteLimit = in.readLong(); bytesWanted = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - durability = in.readEnum(CircuitBreaker.Durability.class); - } else { - durability = CircuitBreaker.Durability.PERMANENT; - } + durability = in.readEnum(CircuitBreaker.Durability.class); } public CircuitBreakingException(String message, CircuitBreaker.Durability durability) { @@ -78,9 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(byteLimit); out.writeLong(bytesWanted); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(durability); - } + out.writeEnum(durability); } public long getBytesWanted() { diff --git a/server/src/main/java/org/opensearch/common/joda/Joda.java b/server/src/main/java/org/opensearch/common/joda/Joda.java index 9ecb3f2236e7c..7a82b8ce49d21 100644 --- a/server/src/main/java/org/opensearch/common/joda/Joda.java +++ b/server/src/main/java/org/opensearch/common/joda/Joda.java @@ -32,8 +32,6 @@ package org.opensearch.common.joda; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; @@ -388,17 +386,6 @@ public DateTimeField getField(Chronology chronology) { } }; - /** - * Checks if a pattern is Joda-style. - * Joda style patterns are not always compatible with java.time patterns. - * @param version - creation version of the index where pattern was used - * @param pattern - the pattern to check - * @return - true if pattern is joda style, otherwise false - */ - public static boolean isJodaPattern(Version version, String pattern) { - return version.before(LegacyESVersion.V_7_0_0) && pattern.startsWith("8") == false; - } - /** * parses epcoch timers * diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 2692a8fa2b914..7b69dff020bc4 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -97,7 +97,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -322,10 +321,7 @@ public static boolean exists(IndexSearcher searcher, Query query) throws IOExcep public static TotalHits readTotalHits(StreamInput in) throws IOException { long totalHits = in.readVLong(); - TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - totalHitsRelation = in.readEnum(TotalHits.Relation.class); - } + TotalHits.Relation totalHitsRelation = in.readEnum(TotalHits.Relation.class); return new TotalHits(totalHits, totalHitsRelation); } @@ -444,11 +440,7 @@ public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { public static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { out.writeVLong(totalHits.value); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(totalHits.relation); - } else if (totalHits.value > 0 && totalHits.relation != TotalHits.Relation.EQUAL_TO) { - throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); - } + out.writeEnum(totalHits.relation); } public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { @@ -648,20 +640,16 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws } private static Number readExplanationValue(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - final int numberType = in.readByte(); - switch (numberType) { - case 0: - return in.readFloat(); - case 1: - return in.readDouble(); - case 2: - return in.readZLong(); - default: - throw new IOException("Unexpected number type: " + numberType); - } - } else { - return in.readFloat(); + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); } } @@ -680,19 +668,15 @@ public static Explanation readExplanation(StreamInput in) throws IOException { } private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - if (value instanceof Float) { - out.writeByte((byte) 0); - out.writeFloat(value.floatValue()); - } else if (value instanceof Double) { - out.writeByte((byte) 1); - out.writeDouble(value.doubleValue()); - } else { - out.writeByte((byte) 2); - out.writeZLong(value.longValue()); - } - } else { + if (value instanceof Float) { + out.writeByte((byte) 0); out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); } } diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index 3944ecfd72d4c..03e92424c4517 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.env; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ObjectParser; @@ -93,7 +92,7 @@ public Version nodeVersion() { public NodeMetadata upgradeToCurrentVersion() { if (nodeVersion.equals(Version.V_EMPTY)) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; return new NodeMetadata(nodeId, Version.CURRENT); } @@ -127,8 +126,7 @@ public void setNodeVersionId(int nodeVersionId) { public NodeMetadata build() { final Version nodeVersion; if (this.nodeVersion == null) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; nodeVersion = Version.V_EMPTY; } else { nodeVersion = this.nodeVersion; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f70fdea153893..48dd0ddf90413 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; @@ -136,8 +135,8 @@ public void start( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "legacy metadata loader is not needed anymore from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 + : "legacy metadata loader is not needed anymore from v4 onwards"; final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 8ec2b70001fc9..7a78d97edf360 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -35,7 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -213,12 +212,10 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } else if ("standard_html_strip".equals(analyzer)) { - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "[standard_html_strip] analyzer is not supported for new indices, " - + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" - ); - } + throw new IllegalArgumentException( + "[standard_html_strip] analyzer is not supported for new indices, " + + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" + ); } return analyzerProvider.get(environment, analyzer).get(); diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index 701a9302fc164..e66ae20508dfe 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -35,8 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.shingle.ShingleFilter; -import org.opensearch.LegacyESVersion; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -48,8 +46,6 @@ */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ShingleTokenFilterFactory.class); - private final Factory factory; public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { @@ -61,27 +57,17 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro int shingleDiff = maxShingleSize - minShingleSize + (outputUnigrams ? 1 : 0); if (shingleDiff > maxAllowedShingleDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" - + " must be less than or equal to: [" - + maxAllowedShingleDiff - + "] but was [" - + shingleDiff - + "]. This limit" - + " can be set by changing the [" - + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "excessive_shingle_diff", - "Deprecated big difference between maxShingleSize and minShingleSize" - + " in Shingle TokenFilter, expected difference must be less than or equal to: [" - + maxAllowedShingleDiff - + "]" - ); - } + throw new IllegalArgumentException( + "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" + + " must be less than or equal to: [" + + maxAllowedShingleDiff + + "] but was [" + + shingleDiff + + "]. This limit" + + " can be set by changing the [" + + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() + + "] index level setting." + ); } Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); @@ -105,16 +91,7 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); - } else { - DEPRECATION_LOGGER.deprecate( - name() + "_synonym_tokenfilters", - "Token filter " + name() + "] will not be usable to parse synonyms after v7.0" - ); - } - return this; - + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); } public Factory getInnerFactory() { diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 8e01c1f41f078..4b19fe4c5de79 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -45,7 +45,6 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.time.DateFormatter; @@ -260,11 +259,7 @@ public Builder( private DateFormatter buildFormatter() { try { - if (Joda.isJodaPattern(indexCreatedVersion, format.getValue())) { - return Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 1e5b3b4a9c93e..23d58fa18b7e3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -40,7 +40,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchGenerationException; -import org.opensearch.Version; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; @@ -90,12 +89,8 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1)); this.rootObjectMapper = builder.build(builderContext); - final String type = rootObjectMapper.name(); final DocumentMapper existingMapper = mapperService.documentMapper(); - final Version indexCreatedVersion = mapperService.getIndexSettings().getIndexVersionCreated(); - final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers( - indexCreatedVersion - ); + final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 9fa088396a38b..237d69e3ad244 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -91,7 +91,7 @@ public DocumentMapperParser( this.scriptService = scriptService; this.typeParsers = mapperRegistry.getMapperParsers(); this.indexVersionCreated = indexSettings.getIndexVersionCreated(); - this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); + this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); } public Mapper.TypeParser.ParserContext parserContext() { diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java index 71d76c6a835c2..a2224e7214f4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -41,7 +41,6 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Explicit; @@ -577,11 +576,7 @@ public void doXContentBody(XContentBuilder builder, boolean includeDefaults, Par } else if (includeDefaults && fieldType().treeLevels() == 0) { // defaults only make sense if tree levels are not specified builder.field(DeprecatedParameters.Names.PRECISION.getPreferredName(), DistanceUnit.METERS.toString(50)); } - - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); - } - + builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); if (includeDefaults || fieldType().distanceErrorPct() != fieldType().defaultDistanceErrorPct) { builder.field(DeprecatedParameters.Names.DISTANCE_ERROR_PCT.getPreferredName(), fieldType().distanceErrorPct()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index f0d0b77396b0e..af37ddc41b567 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -36,7 +36,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -228,8 +227,7 @@ public MapperService( this.mapperRegistry = mapperRegistry; this.idFieldDataEnabled = idFieldDataEnabled; - if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } } @@ -674,7 +672,7 @@ public void close() throws IOException { * this method considers all mapper plugins */ public boolean isMetadataField(String field) { - return mapperRegistry.isMetadataField(indexVersionCreated, field); + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 5257609e0cba9..faf9fd5182690 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -40,7 +40,6 @@ import org.opensearch.common.Explicit; import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Setting; @@ -197,12 +196,7 @@ protected RangeFieldType setupFieldType(BuilderContext context) { // The builder context may not have index created version, falling back to indexCreatedVersion // property of this mapper builder. - DateFormatter dateTimeFormatter; - if (Joda.isJodaPattern(context.indexCreatedVersionOrDefault(indexCreatedVersion), format.getValue())) { - dateTimeFormatter = Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + DateFormatter dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); return new RangeFieldType( buildFullName(context), index.getValue(), diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 4e7b6fb51291b..fe3bcd81e72be 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -254,9 +253,6 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { maxExpansions = in.readVInt(); minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalBoolean(); // unused use_dis_max flag - } tieBreaker = in.readOptionalFloat(); lenient = in.readOptionalBoolean(); cutoffFrequency = in.readOptionalFloat(); @@ -282,9 +278,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(maxExpansions); out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalBoolean(null); - } out.writeOptionalFloat(tieBreaker); out.writeOptionalBoolean(lenient); out.writeOptionalFloat(cutoffFrequency); diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java index 1e3ec368df411..139b8fffbac3a 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java @@ -63,7 +63,6 @@ import org.apache.lucene.search.similarities.NormalizationH2; import org.apache.lucene.search.similarities.NormalizationH3; import org.apache.lucene.search.similarities.NormalizationZ; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; @@ -157,22 +156,9 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings if (model == null) { String replacement = LEGACY_BASIC_MODELS.get(basicModel); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." - ); - } else { - deprecationLogger.deprecate( - basicModel + "_similarity_model_replaced", - "Basic model [" - + basicModel - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - model = BASIC_MODELS.get(replacement); - assert model != null; - } + throw new IllegalArgumentException( + "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." + ); } } @@ -195,22 +181,9 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting if (effect == null) { String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." - ); - } else { - deprecationLogger.deprecate( - afterEffect + "_after_effect_replaced", - "After effect [" - + afterEffect - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - effect = AFTER_EFFECTS.get(replacement); - assert effect != null; - } + throw new IllegalArgumentException( + "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." + ); } } @@ -294,14 +267,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett unknownSettings.removeAll(Arrays.asList(supportedSettings)); unknownSettings.remove("type"); // used to figure out which sim this is if (unknownSettings.isEmpty() == false) { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); - } else { - deprecationLogger.deprecate( - "unknown_similarity_setting", - "Unknown settings for similarity of type [" + type + "]: " + unknownSettings - ); - } + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index d575ec508acb6..c3fc7ffbb0fe5 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -39,12 +39,10 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.TriFunction; import org.opensearch.common.logging.DeprecationLogger; @@ -76,25 +74,12 @@ public final class SimilarityService extends AbstractIndexComponent { static { Map>> defaults = new HashMap<>(); defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - return () -> { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - }; - } else { - final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); - return () -> { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return similarity; - }; - } + return () -> { + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); + }; }); defaults.put("BM25", version -> { final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); @@ -107,20 +92,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map> builtIn = new HashMap<>(); builtIn.put(CLASSIC_SIMILARITY, (settings, version, script) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - } else { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return SimilarityProviders.createClassicSimilarity(settings, version); - } + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); builtIn.put("boolean", (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); @@ -258,10 +233,7 @@ private static void validateScoresArePositive(Version indexCreatedVersion, Simil for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < 0) { - fail( - indexCreatedVersion, - "Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm) - ); + fail("Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm)); break; } } @@ -288,7 +260,6 @@ private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVers float score = scorer.score(freq, norm); if (score < previousScore) { fail( - indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" @@ -327,7 +298,6 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers float score = scorer.score(1, norm); if (score > previousScore) { fail( - indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" @@ -340,12 +310,8 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } } - private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException(message); - } else { - deprecationLogger.deprecate("similarity_failure", message); - } + private static void fail(String message) { + throw new IllegalArgumentException(message); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 6808803ee0988..f2961c0f3b13d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -40,10 +40,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -699,8 +697,7 @@ private synchronized IndexService createIndexService( IndexingOperationListener... indexingOperationListeners ) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetadata, settings, indexScopedSettings); - if (idxSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0) - && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { + if (EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { throw new IllegalArgumentException( "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0" ); @@ -1710,8 +1707,8 @@ public Function> getFieldFilter() { /** * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise. */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return mapperRegistry.isMetadataField(indexCreatedVersion, field); + public boolean isMetadataField(String field) { + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java index cc87c982a684d..22be07dd90f94 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java @@ -33,7 +33,6 @@ package org.opensearch.indices.analysis; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.TokenStream; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -42,7 +41,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.AnalyzerProvider; import org.opensearch.index.analysis.CharFilterFactory; @@ -152,20 +150,7 @@ private NamedRegistry> setupTokenFilters( tokenFilters.register("standard", new AnalysisProvider() { @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - if (indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_0_0)) { - deprecationLogger.deprecate( - "standard_deprecation", - "The [standard] token filter name is deprecated and will be removed in a future version." - ); - } else { - throw new IllegalArgumentException("The [standard] token filter has been removed."); - } - return new AbstractTokenFilterFactory(indexSettings, name, settings) { - @Override - public TokenStream create(TokenStream tokenStream) { - return tokenStream; - } - }; + throw new IllegalArgumentException("The [standard] token filter has been removed."); } @Override diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index 3a1d7b1ebb1e3..c26428309aec5 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,10 +32,8 @@ package org.opensearch.indices.mapper; -import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -53,7 +51,6 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; - private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -63,9 +60,6 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); - Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); - tempPre20.remove(NestedPathFieldMapper.NAME); - this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -81,15 +75,15 @@ public Map getMapperParsers() { * Return a map of the meta mappers that have been registered. The * returned map uses the name of the field as a key. */ - public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; + public Map getMetadataMapperParsers() { + return metadataMapperParsers; } /** * Returns true if the provided field is a registered metadata field, false otherwise */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return getMetadataMapperParsers(indexCreatedVersion).containsKey(field); + public boolean isMetadataField(String field) { + return getMetadataMapperParsers().containsKey(field); } /** diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 819d67cb8621a..426551ab50f18 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -33,7 +33,6 @@ package org.opensearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; @@ -305,13 +304,8 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - bundledJdk = in.readBoolean(); - usingBundledJdk = in.readOptionalBoolean(); - } else { - bundledJdk = false; - usingBundledJdk = null; - } + bundledJdk = in.readBoolean(); + usingBundledJdk = in.readOptionalBoolean(); startTime = in.readLong(); inputArguments = new String[in.readInt()]; for (int i = 0; i < inputArguments.length; i++) { @@ -341,10 +335,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(bundledJdk); - out.writeOptionalBoolean(usingBundledJdk); - } + out.writeBoolean(bundledJdk); + out.writeOptionalBoolean(usingBundledJdk); out.writeLong(startTime); out.writeInt(inputArguments.length); for (String inputArgument : inputArguments) { diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index 34d868f1d6046..9c8f1157cb718 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -89,7 +89,7 @@ public ScriptStats(ScriptContextStats context) { public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); - compilationLimitTriggered = in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readVLong() : 0; + compilationLimitTriggered = in.readVLong(); contextStats = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readList(ScriptContextStats::new) : Collections.emptyList(); } @@ -97,9 +97,7 @@ public ScriptStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(compilationLimitTriggered); - } + out.writeVLong(compilationLimitTriggered); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeList(contextStats); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 84c46e400543a..4b592303ee253 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -34,7 +34,6 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -44,7 +43,6 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.GeoTileUtils; @@ -224,34 +222,12 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu public DateTime(StreamInput in) throws IOException { String datePattern = in.readString(); - String zoneId = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.of(zoneId); - this.resolution = DateFieldMapper.Resolution.MILLISECONDS; - } else { - this.timeZone = ZoneId.of(zoneId); - this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - } - final boolean isJoda; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // if stream is from 7.7 Node it will have a flag indicating if format is joda - isJoda = in.readBoolean(); - } else { - /* - When received a stream from 6.0-6.latest Node it can be java if starts with 8 otherwise joda. - - If a stream is from [7.0 - 7.7) the boolean indicating that this is joda is not present. - This means that if an index was created in 6.x using joda pattern and then cluster was upgraded to - 7.x but earlier then 7.0, there is no information that can tell that the index is using joda style pattern. - It will be assumed that clusters upgrading from [7.0 - 7.7) are using java style patterns. - */ - isJoda = Joda.isJodaPattern(in.getVersion(), datePattern); - } + this.timeZone = ZoneId.of(zoneId); + this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); + final boolean isJoda = in.readBoolean(); this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); - this.parser = formatter.toDateMathParser(); - } @Override @@ -262,16 +238,10 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } else { - out.writeString(timeZone.getId()); - out.writeVInt(resolution.ordinal()); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // in order not to loose information if the formatter is a joda we send a flag - out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. - } + out.writeString(timeZone.getId()); + out.writeVInt(resolution.ordinal()); + // in order not to loose information if the formatter is a joda we send a flag + out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. } public DateMathParser getDateMathParser() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 40c7a5791454c..7c30656505663 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; @@ -99,11 +98,7 @@ public InternalPercentilesBucket(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); percentiles = in.readDoubleArray(); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } - + keyed = in.readBoolean(); computeLookup(); } @@ -112,10 +107,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(percentiles); out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index bef97bbbaa83a..8e68e62b04766 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.DoubleArrayList; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -70,19 +69,13 @@ public PercentilesBucketPipelineAggregationBuilder(String name, String bucketsPa public PercentilesBucketPipelineAggregationBuilder(StreamInput in) throws IOException { super(in, NAME); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index bd838fe23da8b..7fad7e233c424 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; @@ -76,19 +75,13 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { super(in); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java index b7e157b6050fc..ae76fd0a3aa3f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -15,7 +15,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -86,11 +85,7 @@ public BaseMultiValuesSourceFieldConfig(StreamInput in) throws IOException { } this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + this.timeZone = in.readOptionalZoneId(); } @Override @@ -102,11 +97,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeGenericValue(missing); out.writeOptionalWriteable(script); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); doWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 52afc6435d562..b492d9cadb975 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -31,12 +31,10 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.AbstractObjectParser; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; @@ -233,11 +231,7 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - timeZone = in.readOptionalZoneId(); - } + timeZone = in.readOptionalZoneId(); } @Override @@ -259,11 +253,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); innerWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index d3b131cf9f792..565932f1bca13 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -260,11 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - trackTotalHitsUpTo = in.readOptionalInt(); - } else { - trackTotalHitsUpTo = in.readBoolean() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; - } + trackTotalHitsUpTo = in.readOptionalInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { if (in.readBoolean()) { fetchFields = in.readList(FieldAndFormat::new); @@ -326,11 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeOptionalInt(trackTotalHitsUpTo); - } else { - out.writeBoolean(trackTotalHitsUpTo == null ? true : trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); - } + out.writeOptionalInt(trackTotalHitsUpTo); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index 90cc547f62a95..4a82b8eba653f 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -147,16 +147,10 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - out.writeVLong(statistics.docCount()); - out.writeVLong(statistics.sumTotalTermFreq()); - out.writeVLong(statistics.sumDocFreq()); - } else { - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); - } + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); } } @@ -188,16 +182,10 @@ static ObjectObjectHashMap readFieldStats(StreamIn final long docCount; final long sumTotalTermFreq; final long sumDocFreq; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - docCount = in.readVLong(); - sumTotalTermFreq = in.readVLong(); - sumDocFreq = in.readVLong(); - } else { - docCount = subOne(in.readVLong()); - sumTotalTermFreq = subOne(in.readVLong()); - sumDocFreq = subOne(in.readVLong()); - } + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index 828c2f8c78d69..006a0627c337d 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -258,11 +258,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { outboundNetworkTime = in.readVLong(); } clusterAlias = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - allowPartialSearchResults = in.readBoolean(); - } else { - allowPartialSearchResults = false; - } + allowPartialSearchResults = in.readBoolean(); indexRoutings = in.readStringArray(); preference = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { @@ -336,9 +332,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeVLong(outboundNetworkTime); } out.writeOptionalString(clusterAlias); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(allowPartialSearchResults); - } + out.writeBoolean(allowPartialSearchResults); if (asKey == false) { out.writeStringArray(indexRoutings); out.writeOptionalString(preference); diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 21e8a5646b9a5..1fd94eaddb2dd 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -260,16 +259,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecate( - "slice_on_uid", - "Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead" - ); - useTermQuery = true; + throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } else if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 90cc382ee4126..0aa881e2a3c9e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -33,7 +33,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -53,8 +52,6 @@ import org.opensearch.search.suggest.Suggest.Suggestion.Entry; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.completion.CompletionSuggestion; -import org.opensearch.search.suggest.phrase.PhraseSuggestion; -import org.opensearch.search.suggest.term.TermSuggestion; import java.io.IOException; import java.util.ArrayList; @@ -101,36 +98,11 @@ public Suggest(List>> suggestions) } public Suggest(StreamInput in) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - final int size = in.readVInt(); - suggestions = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - Suggestion> suggestion; - final int type = in.readVInt(); - switch (type) { - case TermSuggestion.TYPE: - suggestion = new TermSuggestion(in); - break; - case CompletionSuggestion.TYPE: - suggestion = new CompletionSuggestion(in); - break; - case PhraseSuggestion.TYPE: - suggestion = new PhraseSuggestion(in); - break; - default: - throw new IllegalArgumentException("Unknown suggestion type with ordinal " + type); - } - suggestions.add(suggestion); - } - } else { - int suggestionCount = in.readVInt(); - suggestions = new ArrayList<>(suggestionCount); - for (int i = 0; i < suggestionCount; i++) { - suggestions.add(in.readNamedWriteable(Suggestion.class)); - } + int suggestionCount = in.readVInt(); + suggestions = new ArrayList<>(suggestionCount); + for (int i = 0; i < suggestionCount; i++) { + suggestions.add(in.readNamedWriteable(Suggestion.class)); } - hasScoreDocs = filter(CompletionSuggestion.class).stream().anyMatch(CompletionSuggestion::hasScoreDocs); } @@ -169,18 +141,9 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeVInt(suggestions.size()); - for (Suggestion command : suggestions) { - out.writeVInt(command.getWriteableType()); - command.writeTo(out); - } - } else { - out.writeVInt(suggestions.size()); - for (Suggestion> suggestion : suggestions) { - out.writeNamedWriteable(suggestion); - } + out.writeVInt(suggestions.size()); + for (Suggestion> suggestion : suggestions) { + out.writeNamedWriteable(suggestion); } } @@ -284,13 +247,6 @@ public Suggestion(String name, int size) { public Suggestion(StreamInput in) throws IOException { name = in.readString(); size = in.readVInt(); - - // this is a hack to work around slightly different serialization order of earlier versions of TermSuggestion - if (in.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion t = (TermSuggestion) this; - t.setSort(SortBy.readFromStream(in)); - } - int entriesCount = in.readVInt(); entries.clear(); for (int i = 0; i < entriesCount; i++) { @@ -398,13 +354,6 @@ public void trim() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeVInt(size); - - // this is a hack to work around slightly different serialization order in older versions of TermSuggestion - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion termSuggestion = (TermSuggestion) this; - termSuggestion.getSort().writeTo(out); - } - out.writeVInt(entries.size()); for (Entry entry : entries) { entry.writeTo(out); diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index e3b809dc57b83..bf9598a3110ad 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -37,7 +37,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.geo.GeoPoint; @@ -312,37 +311,14 @@ public void validateReferences(Version indexVersionCreated, Function sortComparator() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - sort.writeTo(out); - } + sort.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 417498467622a..ca5078c4d1c56 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -391,16 +391,6 @@ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionLi @Override public ClusterState execute(ClusterState currentState) { RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY); - if (currentState.getNodes().getMinNodeVersion().before(LegacyESVersion.V_7_0_0)) { - // Check if another restore process is already running - cannot run two restore processes at the - // same time in versions prior to 7.0 - if (restoreInProgress.isEmpty() == false) { - throw new ConcurrentSnapshotExecutionException( - snapshot, - "Restore process is already running in this cluster" - ); - } - } // Check if the snapshot to restore is currently being deleted SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index ac88eae624813..92c0d482a848f 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -36,20 +36,14 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; /** * This class encapsulates all remote cluster information to be rendered on @@ -79,26 +73,7 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { clusterAlias = input.readString(); skipUnavailable = input.readBoolean(); } else { - List seedNodes; - if (input.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - seedNodes = Arrays.asList(input.readStringArray()); - } else { - // versions prior to 7.0.0 sent the resolved transport address of the seed nodes - final List transportAddresses = input.readList(TransportAddress::new); - seedNodes = transportAddresses.stream() - .map(a -> a.address().getHostString() + ":" + a.address().getPort()) - .collect(Collectors.toList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * We just throw any HTTP addresses received here on the floor - * because we don't need to do anything with them. - */ - input.readList(TransportAddress::new); - } - + List seedNodes = Arrays.asList(input.readStringArray()); int connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); int numNodesConnected = input.readVInt(); @@ -137,52 +112,12 @@ public void writeTo(StreamOutput out) throws IOException { } else { if (modeInfo.modeType() == RemoteConnectionStrategy.ConnectionStrategy.SNIFF) { SniffConnectionStrategy.SniffModeInfo sniffInfo = (SniffConnectionStrategy.SniffModeInfo) this.modeInfo; - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(sniffInfo.seedNodes.stream().map(s -> { - final String host = RemoteConnectionStrategy.parseHost(s); - final int port = RemoteConnectionStrategy.parsePort(s); - try { - return new TransportAddress(InetAddress.getByAddress(host, TransportAddress.META_ADDRESS.getAddress()), port); - } catch (final UnknownHostException e) { - throw new AssertionError(e); - } - }).collect(Collectors.toList())); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); out.writeVInt(sniffInfo.maxConnectionsPerCluster); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(sniffInfo.numNodesConnected); } else { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(new String[0]); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(emptyList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(new String[0]); out.writeVInt(0); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(0); diff --git a/server/src/test/java/org/opensearch/BuildTests.java b/server/src/test/java/org/opensearch/BuildTests.java index eeb6890699fdc..6e6a91419b762 100644 --- a/server/src/test/java/org/opensearch/BuildTests.java +++ b/server/src/test/java/org/opensearch/BuildTests.java @@ -299,26 +299,17 @@ public void testSerializationBWC() throws IOException { new Build(Build.Type.DOCKER, randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6), "other") ); - final List versions = Version.getDeclaredVersions(LegacyESVersion.class); - final Version post70Version = randomFrom( - versions.stream().filter(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)).collect(Collectors.toList()) - ); + final List versions = Version.getDeclaredVersions(Version.class); + final Version post10OpenSearchVersion = randomFrom( versions.stream().filter(v -> v.onOrAfter(Version.V_1_0_0)).collect(Collectors.toList()) ); - - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); final WriteableBuild post10OpenSearch = copyWriteable( dockerBuild, writableRegistry(), WriteableBuild::new, post10OpenSearchVersion ); - - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - assertThat(post70.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); assertThat(post10OpenSearch.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); assertThat(post10OpenSearch.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index ff2bb77531486..0ca4cdd780f94 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -393,7 +393,7 @@ public void testSearchContextMissingException() throws IOException { public void testCircuitBreakingException() throws IOException { CircuitBreakingException ex = serialize( new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), - LegacyESVersion.V_7_0_0 + Version.V_2_0_0 ); assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 5b3213ded1c02..70bcf343e4c1e 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -415,7 +415,7 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.V_7_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(7000099))); assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(6050099))); int currentMajorID = Version.computeID(Version.CURRENT.major, 0, 0, 99); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java index 7127e0001592f..672e5ace8b5ae 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -64,14 +63,12 @@ public void testSerialization() throws Exception { Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT ); - // TODO: change version to V_6_6_0 after backporting: - if (testVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - if (randomBoolean()) { - clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); - } - if (randomBoolean()) { - clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); - } + + if (randomBoolean()) { + clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); + } + if (randomBoolean()) { + clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); } BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 19544af63944c..3ffa6d6910548 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; @@ -107,11 +106,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest searchRequest = createSearchRequest(); Version version = VersionUtils.randomVersion(random()); SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); - if (version.before(LegacyESVersion.V_7_0_0)) { - assertTrue(deserializedRequest.isCcsMinimizeRoundtrips()); - } else { - assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); - } + assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5aa582a5e73f6..52922bc6a0e83 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -111,18 +111,16 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - final Version tooLow = LegacyESVersion.fromString("6.7.0"); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } + final Version tooLow = LegacyESVersion.fromString("6.7.0"); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0) && minNodeVersion.before(Version.V_3_0_0)) { + if (minNodeVersion.before(Version.V_3_0_0)) { Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index fb01a493ff7c3..72b22e0efc09b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.close.CloseIndexResponse; @@ -44,9 +43,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; @@ -97,7 +93,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -181,91 +176,6 @@ public void testCloseRoutingTableWithSnapshottedIndex() { assertThat(updatedState.blocks().hasIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID), is(true)); } - public void testCloseRoutingTableRemovesRoutingTable() { - final Set nonBlockedIndices = new HashSet<>(); - final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); - final ClusterBlock closingBlock = MetadataIndexStateService.createIndexClosingBlock(); - - ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); - for (int i = 0; i < randomIntBetween(1, 25); i++) { - final String indexName = "index-" + i; - - if (randomBoolean()) { - state = addOpenedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state); - nonBlockedIndices.add(state.metadata().index(indexName).getIndex()); - } else { - state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - final Index index = state.metadata().index(indexName).getIndex(); - blockedIndices.put(index, closingBlock); - if (randomBoolean()) { - results.put(index, new CloseIndexResponse.IndexResult(index)); - } else { - results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); - } - } - } - - state = ClusterState.builder(state) - .nodes( - DiscoveryNodes.builder(state.nodes()) - .add( - new DiscoveryNode( - "old_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_0_0 - ) - ) - .add( - new DiscoveryNode( - "new_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_2_0 - ) - ) - ) - .build(); - - state = MetadataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); - assertThat(state.metadata().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); - - for (Index nonBlockedIndex : nonBlockedIndices) { - assertIsOpened(nonBlockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); - } - for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).hasFailures() == false) { - IndexMetadata indexMetadata = state.metadata().index(blockedIndex); - assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); - Settings indexSettings = indexMetadata.getSettings(); - assertThat(indexSettings.hasValue(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false)); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), MetadataIndexStateService.INDEX_CLOSED_BLOCK), is(true)); - assertThat( - "Index must have only 1 block with [id=" + MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]", - state.blocks() - .indices() - .getOrDefault(blockedIndex.getName(), emptySet()) - .stream() - .filter(clusterBlock -> clusterBlock.id() == MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID) - .count(), - equalTo(1L) - ); - assertThat( - "Index routing table should have been removed when closing the index on mixed cluster version", - state.routingTable().index(blockedIndex), - nullValue() - ); - } else { - assertIsOpened(blockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), closingBlock), is(true)); - } - } - } - public void testAddIndexClosedBlocks() { final ClusterState initialState = ClusterState.builder(new ClusterName("testAddIndexClosedBlocks")).build(); { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 6848cd2bbc773..a1b6cd763476a 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.settings; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -634,7 +633,7 @@ public void testMissingValue() throws Exception { public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.CURRENT, LegacyESVersion.V_7_0_0)); + output.setVersion(randomFrom(Version.CURRENT, Version.V_2_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); Settings.writeSettingsToStream(settings, output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 0f451fda7b9fb..22c10844028a9 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -94,7 +94,7 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { if (randomBoolean()) { // drawing a truly random zoneId here can rarely fail under the following conditons: - // - index versionCreated before V_7_0_0 + // - index versionCreated before legacy V_7_0_0 // - no "forced" date parser through a format parameter // - one of the SystemV* time zones that Jodas DateTimeZone parser doesn't know about // thats why we exlude it here (see #58431) diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index eb666f1206c26..1d7b749433c65 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.Similarity; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; @@ -97,7 +97,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, negativeScoresSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -122,7 +122,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -147,7 +147,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index ec7ab06ac86a6..9c8ad3917c23f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -32,7 +32,6 @@ package org.opensearch.indices; -import org.opensearch.Version; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -51,7 +50,6 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -105,11 +103,9 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(version); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty()); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); assertEquals(EXPECTED_METADATA_FIELDS.length, metadataMapperParsers.size()); int i = 0; for (String field : metadataMapperParsers.keySet()) { @@ -117,12 +113,7 @@ public void testBuiltinMappers() { } } { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.V_1_0_0, - VersionUtils.getPreviousVersion(Version.V_2_0_0) - ); - assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + assertEquals(EXPECTED_METADATA_FIELDS.length, module.getMapperRegistry().getMetadataMapperParsers().size()); } } @@ -132,11 +123,10 @@ public void testBuiltinWithPlugins() { MapperRegistry registry = module.getMapperRegistry(); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); assertThat( - registry.getMetadataMapperParsers(Version.CURRENT).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size()) + registry.getMetadataMapperParsers().size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size()) ); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(Version.CURRENT); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; @@ -213,15 +203,13 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index e481384c3d6f3..c39af60650657 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,7 +66,6 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -79,7 +78,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.VersionUtils; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -565,16 +563,9 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { public void testIsMetadataField() { IndicesService indicesService = getIndicesService(); - final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); - assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); + assertFalse(indicesService.isMetadataField(randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { - continue; // nested field mapper does not exist prior to 2.0 - } - assertTrue( - "Expected " + builtIn + " to be a metadata field for version " + randVersion, - indicesService.isMetadataField(randVersion, builtIn) - ); + assertTrue(indicesService.isMetadataField(builtIn)); } } diff --git a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java index 6b89eb92065b1..1cdc2f166224f 100644 --- a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java @@ -35,16 +35,13 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.OriginalIndicesTests; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; @@ -52,7 +49,6 @@ import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.Aggregations; -import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalAggregationsTests; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchContextId; @@ -60,9 +56,6 @@ import org.opensearch.search.suggest.SuggestTests; import org.opensearch.test.OpenSearchTestCase; -import java.io.IOException; -import java.util.Base64; - import static java.util.Collections.emptyList; public class QuerySearchResultTests extends OpenSearchTestCase { @@ -127,32 +120,6 @@ public void testSerialization() throws Exception { assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); } - public void testReadFromPre_7_1_0() throws IOException { - String message = "AAAAAAAAAGQAAAEAAAB/wAAAAAEBBnN0ZXJtcwVJblhNRgoDBVNhdWpvAAVrS3l3cwVHSVVZaAAFZXRUbEUFZGN0WVoABXhzYnVrAAEDAfoN" - + "A3JhdwUBAAJRAAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVkFhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hyd" - + "y0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2RMZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAA" - + "AAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZd3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXh" - + "DSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAAAEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NL" - + "U1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAApydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQ" - + "lFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksAClRJZHJlSkpVc1Y4AAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVk" - + "FhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hydy0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2R" - + "MZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAAAAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZ" - + "d3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXhDSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAA" - + "AEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NLU1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAA" - + "pydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQlFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksACm5rdExLUHp3cGgBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2t" - + "ldDH/A3JhdwEBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2tldDH/A3JhdwEAAAIAAf////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; - byte[] bytes = Base64.getDecoder().decode(message); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { - in.setVersion(LegacyESVersion.V_7_0_0); - QuerySearchResult querySearchResult = new QuerySearchResult(in); - assertEquals(100, querySearchResult.getContextId().getId()); - assertTrue(querySearchResult.hasAggs()); - InternalAggregations aggs = querySearchResult.consumeAggs().expand(); - assertEquals(1, aggs.asList().size()); - // We deserialize and throw away top level pipeline aggs - } - } - public void testNullResponse() throws Exception { QuerySearchResult querySearchResult = QuerySearchResult.nullInstance(); QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, Version.CURRENT); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index f71c67ce456bc..22f4fcc1fde3f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.HasAttributeNodeSelector; import org.opensearch.client.Node; @@ -367,18 +366,6 @@ void checkWarningHeaders(final List warningHeaders, final Version cluste final boolean matches = matcher.matches(); if (matches) { final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true); - if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0) - && message.equals( - "the default number of shards will change from [5] to [1] in 7.0.0; " - + "if you wish to continue using the default of [5] shards, " - + "you must manage this on the create index request or with an index template" - )) { - /* - * This warning header will come back in the vast majority of our tests that create an index when running against an - * older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected. - */ - continue; - } if (message.startsWith("[types removal]")) { // We skip warnings related to types deprecation because they are *everywhere*. continue; diff --git a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java index d79e1730e16f6..9fb693efa9f8b 100644 --- a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.xcontent.support.XContentMapValues; @@ -52,10 +51,10 @@ public static boolean isRunningAgainstOldCluster() { /** * @return true if test is running against an old cluster before that last major, in this case - * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link LegacyESVersion#V_7_0_0} + * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link Version#V_2_0_0} */ protected final boolean isRunningAgainstAncientCluster() { - return isRunningAgainstOldCluster() && oldClusterVersion.before(LegacyESVersion.V_7_0_0); + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_2_0_0); } public static Version getOldClusterVersion() { From 588db38c38b3166d6d4ad31bc4fa516e4304ac42 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 7 Oct 2022 09:47:09 -0400 Subject: [PATCH 035/122] Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks (#4696) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../org/opensearch/gradle/PublishPlugin.java | 5 +++-- .../opensearch/gradle/pluginzip/Publish.java | 17 ++++++++++++----- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1b23eaa5ac20..2f7893881fdf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -124,6 +124,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) +- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 2bdef8e4cd244..be12fdd99c1df 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -92,7 +92,7 @@ public String call() throws Exception { return String.format( "%s/distributions/%s-%s.pom", project.getBuildDir(), - getArchivesBaseName(project), + pomTask.getName().toLowerCase().contains("zip") ? project.getName() : getArchivesBaseName(project), project.getVersion() ); } @@ -130,7 +130,6 @@ public String call() throws Exception { publication.getPom().withXml(PublishPlugin::addScmInfo); if (!publication.getName().toLowerCase().contains("zip")) { - // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); @@ -139,6 +138,8 @@ public String call() throws Exception { publication.artifact(project.getTasks().getByName("sourcesJar")); publication.artifact(project.getTasks().getByName("javadocJar")); } + } else { + project.afterEvaluate(p -> publication.setArtifactId(project.getName())); } generatePomTask.configure( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 6dc7d660922b2..6b581fcaa7774 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -13,12 +13,14 @@ import org.gradle.api.publish.maven.MavenPublication; import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; + import org.gradle.api.Task; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; @@ -67,10 +69,15 @@ public void apply(Project project) { if (validatePluginZipPom != null) { validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + + // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: + // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal + final Set publishPluginZipPublicationToTasks = project.getTasks() + .stream() + .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) + .collect(Collectors.toSet()); + if (!publishPluginZipPublicationToTasks.isEmpty()) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); } } else { project.getLogger() From ef50f78d27e78c9b367d3c44f2f46450ca2749ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Oct 2022 08:24:05 -0700 Subject: [PATCH 036/122] Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture (#4066) * Bump gson from 2.9.0 to 2.9.1 in /test/fixtures/hdfs-fixture Bumps [gson](https://github.com/google/gson) from 2.9.0 to 2.9.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.9.0...gson-parent-2.9.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f7893881fdf1..621e64717338a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] +### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 ### Added @@ -156,4 +158,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 40e3a1dc0587d..2e541572b7385 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -40,7 +40,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.0' + api 'com.google.code.gson:gson:2.9.1' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" From 109319e0724d5dfa46f5554a7ef819560eaa1fac Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 11:25:39 -0500 Subject: [PATCH 037/122] Always auto release the flood stage block (#4703) * Always auto release the flood stage block Removes support for using a system property to disable the automatic release of the write block applied when a node exceeds the flood-stage watermark. Signed-off-by: Nicholas Walter Knize * update IAE message Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../allocation/DiskThresholdMonitor.java | 37 ++----------------- .../allocation/DiskThresholdSettings.java | 18 +++------ .../DiskThresholdSettingsTests.java | 1 - 4 files changed, 11 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 621e64717338a..e35d8c3eda0b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index a89271261ed14..0a6cfd8c04977 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.Client; @@ -54,7 +53,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.Strings; import org.opensearch.common.collect.ImmutableOpenMap; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; @@ -88,7 +86,6 @@ public class DiskThresholdMonitor { private final RerouteService rerouteService; private final AtomicLong lastRunTimeMillis = new AtomicLong(Long.MIN_VALUE); private final AtomicBoolean checkInProgress = new AtomicBoolean(); - private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(logger.getName()); /** * The IDs of the nodes that were over the low threshold in the last check (and maybe over another threshold too). Tracked so that we @@ -121,14 +118,6 @@ public DiskThresholdMonitor( this.rerouteService = rerouteService; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; - if (diskThresholdSettings.isAutoReleaseIndexEnabled() == false) { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - } } private void checkFinished() { @@ -371,23 +360,7 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); if (indicesToAutoRelease.isEmpty() == false) { - if (diskThresholdSettings.isAutoReleaseIndexEnabled()) { - logger.info("releasing read-only-allow-delete block on indices: [{}]", indicesToAutoRelease); - updateIndicesReadOnly(indicesToAutoRelease, listener, false); - } else { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - logger.debug( - "[{}] disabled, not releasing read-only-allow-delete block on indices: [{}]", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - indicesToAutoRelease - ); - listener.onResponse(null); - } + updateIndicesReadOnly(indicesToAutoRelease, listener, false); } else { logger.trace("no auto-release required"); listener.onResponse(null); @@ -421,11 +394,9 @@ private void markNodesMissingUsageIneligibleForRelease( ) { for (RoutingNode routingNode : routingNodes) { if (usages.containsKey(routingNode.nodeId()) == false) { - if (routingNode != null) { - for (ShardRouting routing : routingNode) { - String indexName = routing.index().getName(); - indicesToMarkIneligibleForAutoRelease.add(indexName); - } + for (ShardRouting routing : routingNode) { + String indexName = routing.index().getName(); + indicesToMarkIneligibleForAutoRelease.add(indexName); } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java index 0ce0b1bd7b688..56a1ccad112c5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -108,18 +109,15 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; private volatile Double freeDiskThresholdFloodStage; private volatile ByteSizeValue freeBytesThresholdFloodStage; - private static final boolean autoReleaseIndexEnabled; - public static final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; static { + assert Version.CURRENT.major == Version.V_2_0_0.major + 1; // this check is unnecessary in v4 + final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; + final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property == null) { - autoReleaseIndexEnabled = true; - } else if (Boolean.FALSE.toString().equals(property)) { - autoReleaseIndexEnabled = false; - } else { + if (property != null) { throw new IllegalArgumentException( - AUTO_RELEASE_INDEX_ENABLED_KEY + " may only be unset or set to [false] but was [" + property + "]" + "system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] has been removed in 3.0.0 and is not supported anymore" ); } } @@ -371,10 +369,6 @@ public ByteSizeValue getFreeBytesThresholdFloodStage() { return freeBytesThresholdFloodStage; } - public boolean isAutoReleaseIndexEnabled() { - return autoReleaseIndexEnabled; - } - public boolean includeRelocations() { return includeRelocations; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index 363484777fe66..5184ca7fe887d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -60,7 +60,6 @@ public void testDefaults() { assertTrue(diskThresholdSettings.includeRelocations()); assertEquals(zeroBytes, diskThresholdSettings.getFreeBytesThresholdFloodStage()); assertEquals(5.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); - assertTrue(diskThresholdSettings.isAutoReleaseIndexEnabled()); } public void testUpdate() { From 2e4b27b243d8bd2c515f66cf86c6d1d6a601307f Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Fri, 7 Oct 2022 22:26:38 +0530 Subject: [PATCH 038/122] Controlling discovery for decommissioned nodes (#4590) * Controlling discovery for decommissioned nodes Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../cluster/coordination/Coordinator.java | 28 +++++++- .../cluster/coordination/JoinHelper.java | 16 ++++- .../coordination/JoinTaskExecutor.java | 32 ++++----- .../decommission/DecommissionService.java | 38 ++++++++-- .../common/settings/ClusterSettings.java | 1 + .../org/opensearch/discovery/PeerFinder.java | 29 +++++++- .../coordination/CoordinatorTests.java | 43 +++++++++++ .../cluster/coordination/JoinHelperTests.java | 9 ++- .../coordination/JoinTaskExecutorTests.java | 45 ++++++++++++ .../cluster/coordination/NodeJoinTests.java | 71 +++++++++++++++++++ .../opensearch/discovery/PeerFinderTests.java | 46 ++++++++++++ 12 files changed, 329 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e35d8c3eda0b3..04b62c11a6865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 7ac716084793d..fbb345ea3a441 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,6 +105,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -138,6 +139,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final Settings settings; private final boolean singleNodeDiscovery; + private volatile boolean localNodeCommissioned; private final ElectionStrategy electionStrategy; private final TransportService transportService; private final ClusterManagerService clusterManagerService; @@ -218,7 +220,8 @@ public Coordinator( this::joinLeaderInTerm, this.onJoinValidators, rerouteService, - nodeHealthService + nodeHealthService, + this::onNodeCommissionStatusChange ); this.persistedStateSupplier = persistedStateSupplier; this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); @@ -281,6 +284,7 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.localNodeCommissioned = true; } private ClusterFormationState getClusterFormationState() { @@ -596,6 +600,9 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion() ); + // we are checking source node commission status here to reject any join request coming from a decommissioned node + // even before executing the join task to fail fast + JoinTaskExecutor.ensureNodeCommissioned(joinRequest.getSourceNode(), stateForJoinValidation.metadata()); } sendValidateJoinRequest(stateForJoinValidation, joinRequest, joinCallback); } else { @@ -1424,6 +1431,17 @@ protected void onFoundPeersUpdated() { } } + // package-visible for testing + synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + this.localNodeCommissioned = localNodeCommissioned; + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + } + + // package-visible for testing + boolean localNodeCommissioned() { + return localNodeCommissioned; + } + private void startElectionScheduler() { assert electionScheduler == null : electionScheduler; @@ -1450,6 +1468,14 @@ public void run() { return; } + // if either the localNodeCommissioned flag or the last accepted state thinks it should skip pre voting, we will + // acknowledge it + if (nodeCommissioned(lastAcceptedState.nodes().getLocalNode(), lastAcceptedState.metadata()) == false + || localNodeCommissioned == false) { + logger.debug("skip prevoting as local node is decommissioned"); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 656e6d220720f..a66152b8016ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; @@ -57,6 +58,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; @@ -78,6 +80,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -118,6 +121,7 @@ public class JoinHelper { private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); private final Supplier joinTaskExecutorGenerator; + private final Consumer nodeCommissioned; JoinHelper( Settings settings, @@ -130,12 +134,14 @@ public class JoinHelper { Function joinLeaderInTerm, Collection> joinValidators, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + Consumer nodeCommissioned ) { this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); + this.nodeCommissioned = nodeCommissioned; this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { private final long term = currentTermSupplier.getAsLong(); @@ -342,6 +348,7 @@ public void handleResponse(Empty response) { pendingOutgoingJoins.remove(dedupKey); logger.debug("successfully joined {} with {}", destination, joinRequest); lastFailedJoinAttempt.set(null); + nodeCommissioned.accept(true); onCompletion.run(); } @@ -352,6 +359,13 @@ public void handleException(TransportException exp) { FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); lastFailedJoinAttempt.set(attempt); + if (exp instanceof RemoteTransportException && (exp.getCause() instanceof NodeDecommissionedException)) { + logger.info( + "local node is decommissioned [{}]. Will not be able to join the cluster", + exp.getCause().getMessage() + ); + nodeCommissioned.accept(false); + } onCompletion.run(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 814aa17255931..ac237db85ee5b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -39,9 +39,6 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.decommission.DecommissionAttribute; -import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; -import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -64,6 +61,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; /** @@ -196,6 +194,9 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); + // we have added the same check in handleJoinRequest method and adding it here as this method + // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness + ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); @@ -203,7 +204,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (node.isClusterManagerNode()) { joiniedNodeNameIds.put(node.getName(), node.getId()); } - } catch (IllegalArgumentException | IllegalStateException e) { + } catch (IllegalArgumentException | IllegalStateException | NodeDecommissionedException e) { results.failure(joinTask, e); continue; } @@ -477,22 +478,13 @@ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version } public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) { - DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { - DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); - DecommissionStatus status = decommissionAttributeMetadata.status(); - if (decommissionAttribute != null && status != null) { - // We will let the node join the cluster if the current status is in FAILED state - if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) - && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { - throw new NodeDecommissionedException( - "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", - node.toString(), - decommissionAttribute.toString(), - status.status() - ); - } - } + if (nodeCommissioned(node, metadata) == false) { + throw new NodeDecommissionedException( + "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", + node.toString(), + metadata.decommissionAttributeMetadata().decommissionAttribute().toString(), + metadata.decommissionAttributeMetadata().status().status() + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index fcab411f073ba..5def2733b5ded 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -389,10 +389,6 @@ private Set filterNodesWithDecommissionAttribute( return nodesWithDecommissionAttribute; } - private static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { - return discoveryNode.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()); - } - private static void validateAwarenessAttribute( final DecommissionAttribute decommissionAttribute, List awarenessAttributes, @@ -531,4 +527,38 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } + + /** + * Utility method to check if the node has decommissioned attribute + * + * @param discoveryNode node to check on + * @param decommissionAttribute attribute to be checked with + * @return true or false based on whether node has decommissioned attribute + */ + public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); + return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); + } + + /** + * Utility method to check if the node is commissioned or not + * + * @param discoveryNode node to check on + * @param metadata metadata present current which will be used to check the commissioning status of the node + * @return if the node is commissioned or not + */ + public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + return false; + } + } + } + return true; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1665614c18496..54579031aac08 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -534,6 +534,7 @@ public void apply(Settings value, Settings current, Settings previous) { PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING, + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING, PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING, ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING, ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index a601a6fbe4d82..e8b6c72c512a2 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -84,6 +84,14 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); + // the time between attempts to find all peers when node is in decommissioned state, default set to 2 minutes + public static final Setting DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING = Setting.timeSetting( + "discovery.find_peers_interval_during_decommission", + TimeValue.timeValueSeconds(120L), + TimeValue.timeValueMillis(1000), + Setting.Property.NodeScope + ); + public static final Setting DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting( "discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), @@ -91,7 +99,8 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); - private final TimeValue findPeersInterval; + private final Settings settings; + private TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; private final Object mutex = new Object(); @@ -112,6 +121,7 @@ public PeerFinder( TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver ) { + this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -128,6 +138,23 @@ public PeerFinder( ); } + public synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + findPeersInterval = localNodeCommissioned + ? DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings) + : DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings); + logger.info( + "setting findPeersInterval to [{}] as node commission status = [{}] for local node [{}]", + findPeersInterval, + localNodeCommissioned, + transportService.getLocalNode() + ); + } + + // package private for tests + TimeValue getFindPeersInterval() { + return findPeersInterval; + } + public void activate(final DiscoveryNodes lastAcceptedNodes) { logger.trace("activating with {}", lastAcceptedNodes); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index d96c972bc6021..74c5d0fcccbed 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -87,6 +87,7 @@ import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; +import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -1780,6 +1781,48 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { } } + public void testLocalNodeAlwaysCommissionedWithoutDecommissionedException() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + } + } + + public void testClusterStabilisesForPreviouslyDecommissionedNode() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + final ClusterNode leader = cluster.getAnyLeader(); + + ClusterNode decommissionedNode = cluster.new ClusterNode( + nextNodeIndex.getAndIncrement(), true, leader.nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info") + ); + decommissionedNode.coordinator.onNodeCommissionStatusChange(false); + cluster.clusterNodes.add(decommissionedNode); + + assertFalse(decommissionedNode.coordinator.localNodeCommissioned()); + + cluster.stabilise( + // Interval is updated to decommissioned find peer interval + defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING) + // One message delay to send a join + + DEFAULT_DELAY_VARIABILITY + // Commit a new cluster state with the new node(s). Might be split into multiple commits, and each might need a + // followup reconfiguration + + 3 * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY + ); + + // once cluster stabilises the node joins and would be commissioned + assertTrue(decommissionedNode.coordinator.localNodeCommissioned()); + } + } + private ClusterState buildNewClusterStateWithVotingConfigExclusion( ClusterState currentState, Set newVotingConfigExclusion diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index a3c945cdbac3a..7b21042b2ed4a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -90,7 +90,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> new StatusInfo(HEALTHY, "info") + () -> new StatusInfo(HEALTHY, "info"), + nodeCommissioned -> {} ); transportService.start(); @@ -230,7 +231,8 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - null + null, + nodeCommissioned -> {} ); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); @@ -284,7 +286,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + nodeCommissioned -> {} ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 52922bc6a0e83..66a3b00f2979d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -261,6 +261,51 @@ public void testJoinClusterWithDifferentDecommission() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinFailedForDecommissionedNode() throws Exception { + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + final ClusterState clusterManagerClusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + + final DiscoveryNode decommissionedNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + String decommissionedNodeID = decommissionedNode.getId(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterManagerClusterState, + List.of(new JoinTaskExecutor.Task(decommissionedNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertFalse(taskResult.isSuccess()); + assertTrue(taskResult.getFailure() instanceof NodeDecommissionedException); + assertFalse(result.resultingState.getNodes().nodeExists(decommissionedNodeID)); + } + public void testJoinClusterWithDecommissionFailed() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index c77baba5fe167..18a7b892a424c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -39,6 +39,10 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -775,6 +779,60 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { assertTrue(clusterStateHasNode(node1)); } + public void testJoinFailsWhenDecommissioned() { + DiscoveryNode node0 = newNode(0, true); + DiscoveryNode node1 = newNode(1, true); + long initialTerm = randomLongBetween(1, 10); + long initialVersion = randomLongBetween(1, 10); + setupFakeClusterManagerServiceAndCoordinator( + initialTerm, + initialStateWithDecommissionedAttribute( + initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), + new DecommissionAttribute("zone", "zone1") + ), + () -> new StatusInfo(HEALTHY, "healthy-info") + ); + assertFalse(isLocalNodeElectedMaster()); + long newTerm = initialTerm + randomLongBetween(1, 10); + joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertFalse(clusterStateHasNode(node1)); + joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertTrue(clusterStateHasNode(node1)); + DiscoveryNode decommissionedNode = new DiscoveryNode( + "data_2", + 2 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long anotherTerm = newTerm + randomLongBetween(1, 10); + + assertThat( + expectThrows( + NodeDecommissionedException.class, + () -> joinNodeAndRun(new JoinRequest(decommissionedNode, anotherTerm, Optional.empty())) + ).getMessage(), + containsString("with current status of decommissioning") + ); + assertFalse(clusterStateHasNode(decommissionedNode)); + + DiscoveryNode node3 = new DiscoveryNode( + "data_3", + 3 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone2"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long termForNode3 = anotherTerm + randomLongBetween(1, 10); + + joinNodeAndRun(new JoinRequest(node3, termForNode3, Optional.empty())); + assertTrue(clusterStateHasNode(node3)); + } + private boolean isLocalNodeElectedMaster() { return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } @@ -782,4 +840,17 @@ private boolean isLocalNodeElectedMaster() { private boolean clusterStateHasNode(DiscoveryNode node) { return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } + + private static ClusterState initialStateWithDecommissionedAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute + ) { + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 5e7dede0309c6..7e7bb2f0a2911 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -807,6 +807,42 @@ public void testReconnectsToDisconnectedNodes() { assertFoundPeers(rebootedOtherNode); } + public void testConnectionAttemptDuringDecommissioning() { + boolean localNodeCommissioned = randomBoolean(); + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + + long findPeersInterval = peerFinder.getFindPeersInterval().millis(); + + final DiscoveryNode otherNode = newDiscoveryNode("node-1"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + runAllRunnableTasks(); + assertFoundPeers(otherNode); + + transportAddressConnector.reachableNodes.clear(); + final DiscoveryNode newNode = new DiscoveryNode("new-node", otherNode.getAddress(), Version.CURRENT); + transportAddressConnector.addReachableNode(newNode); + + connectedNodes.remove(otherNode); + disconnectedNodes.add(otherNode); + + // peer discovery will be delayed now + if (localNodeCommissioned == false) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + assertPeersNotDiscovered(newNode); + } + + final long expectedTime = CONNECTION_TIMEOUT_MILLIS + findPeersInterval; + while (deterministicTaskQueue.getCurrentTimeMillis() < expectedTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + } + assertFoundPeers(newNode); + } + private void respondToRequests(Function responseFactory) { final CapturedRequest[] capturedRequests = capturingTransport.getCapturedRequestsAndClear(); for (final CapturedRequest capturedRequest : capturedRequests) { @@ -828,6 +864,16 @@ private void assertFoundPeers(DiscoveryNode... expectedNodesArray) { assertNotifiedOfAllUpdates(); } + private void assertPeersNotDiscovered(DiscoveryNode... undiscoveredNodesArray) { + final Set undiscoveredNodes = Arrays.stream(undiscoveredNodesArray).collect(Collectors.toSet()); + final List actualNodesList = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false) + .collect(Collectors.toList()); + final HashSet actualNodesSet = new HashSet<>(actualNodesList); + Set intersection = new HashSet<>(actualNodesSet); + intersection.retainAll(undiscoveredNodes); + assertEquals(intersection.size(), 0); + } + private void assertNotifiedOfAllUpdates() { final Stream actualNodes = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false); final Stream notifiedNodes = StreamSupport.stream(foundPeersFromNotification.spliterator(), false); From fe3994ccdf62735907bc4834f93c71d97636d150 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 15:41:16 -0500 Subject: [PATCH 039/122] [Remove] LegacyESVersion.V_7_2_* and V_7_3_* constants (#4702) Removes all usages of LegacyESVersion.V_7_2_ and LegacyESVersion.V_7_3 version constants along with related ancient APIs. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 3 +- .../common/CommonAnalysisModulePlugin.java | 45 ++++----- .../upgrades/FullClusterRestartIT.java | 8 +- .../org/opensearch/upgrades/RecoveryIT.java | 59 ++++-------- .../gateway/ReplicaShardAllocatorIT.java | 3 +- .../indices/IndicesLifecycleListenerIT.java | 11 +-- .../java/org/opensearch/LegacyESVersion.java | 5 - .../org/opensearch/OpenSearchException.java | 2 +- .../cluster/health/ClusterHealthRequest.java | 11 +-- .../TransportNodesHotThreadsAction.java | 4 +- .../node/info/TransportNodesInfoAction.java | 4 +- ...nsportNodesReloadSecureSettingsAction.java | 4 +- .../node/stats/TransportNodesStatsAction.java | 4 +- .../node/usage/TransportNodesUsageAction.java | 4 +- .../create/CreateSnapshotRequest.java | 9 +- .../status/TransportNodesSnapshotsStatus.java | 4 +- .../stats/TransportClusterStatsAction.java | 4 +- .../indices/alias/IndicesAliasesRequest.java | 12 +-- .../admin/indices/analyze/AnalyzeAction.java | 63 ++---------- .../indices/close/CloseIndexRequest.java | 11 +-- .../indices/close/CloseIndexResponse.java | 18 +--- ...TransportVerifyShardBeforeCloseAction.java | 11 +-- .../find/NodeFindDanglingIndexRequest.java | 4 +- .../list/NodeListDanglingIndicesRequest.java | 4 +- .../admin/indices/stats/CommonStatsFlags.java | 9 +- .../fieldcaps/FieldCapabilitiesRequest.java | 10 +- .../fieldcaps/FieldCapabilitiesResponse.java | 11 +-- .../action/search/GetAllPitNodeRequest.java | 4 +- .../action/support/nodes/BaseNodeRequest.java | 69 ------------- .../support/nodes/TransportNodesAction.java | 2 +- .../cluster/SnapshotsInProgress.java | 11 +-- .../cluster/metadata/IndexMetadata.java | 24 ++--- .../opensearch/cluster/metadata/Metadata.java | 19 +--- .../metadata/MetadataIndexStateService.java | 20 +--- .../cluster/node/DiscoveryNode.java | 96 ++++++------------- .../TransportNodesListGatewayMetaState.java | 4 +- ...ransportNodesListGatewayStartedShards.java | 4 +- .../index/engine/ReadOnlyEngine.java | 26 ++--- .../org/opensearch/index/get/GetResult.java | 25 +---- .../index/mapper/TextFieldMapper.java | 3 +- .../index/query/IntervalsSourceProvider.java | 11 +-- .../index/refresh/RefreshStats.java | 13 +-- .../index/seqno/ReplicationTracker.java | 2 +- .../recovery/RecoveryCleanFilesRequest.java | 12 +-- .../RecoveryTranslogOperationsRequest.java | 5 +- .../TransportNodesListShardStoreMetadata.java | 4 +- .../AutoDateHistogramAggregationBuilder.java | 25 +---- .../bucket/histogram/DateIntervalWrapper.java | 34 +------ .../metrics/InternalGeoCentroid.java | 17 +--- .../search/query/QuerySearchResult.java | 30 ------ .../search/sort/FieldSortBuilder.java | 9 +- .../opensearch/snapshots/SnapshotInfo.java | 15 +-- .../node/tasks/CancellableTasksTests.java | 4 +- .../node/tasks/ResourceAwareTasksTests.java | 4 +- .../node/tasks/TaskManagerTestCase.java | 6 +- .../cluster/node/tasks/TestTaskPlugin.java | 3 +- .../node/tasks/TransportTasksActionTests.java | 4 +- .../indices/close/CloseIndexRequestTests.java | 16 +--- .../nodes/TransportNodesActionTests.java | 3 +- .../termvectors/TermVectorsUnitTests.java | 4 +- 60 files changed, 212 insertions(+), 653 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 04b62c11a6865..7ee30dc84e9fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) ### Fixed @@ -160,4 +161,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 7cad01c5cc00a..b0850d0b9144d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -563,18 +563,22 @@ public List getPreConfiguredTokenFilters() { ) ) ); - filters.add(PreConfiguredTokenFilter.openSearchVersion("word_delimiter_graph", false, false, (input, version) -> { - boolean adjustOffsets = version.onOrAfter(LegacyESVersion.V_7_3_0); - return new WordDelimiterGraphFilter( - input, - adjustOffsets, - WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, - WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS - | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, - null - ); - })); + filters.add( + PreConfiguredTokenFilter.openSearchVersion( + "word_delimiter_graph", + false, + false, + (input, version) -> new WordDelimiterGraphFilter( + input, + true, + WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS + | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, + null + ) + ) + ); return filters; } @@ -588,12 +592,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edge_ngram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - })); + tokenizers.add( + PreConfiguredTokenizer.openSearchVersion( + "edge_ngram", + (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) + ) + ); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -619,10 +623,7 @@ public List getPreConfiguredTokenizers() { + "Please change the tokenizer name to [edge_ngram] instead." ); } - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 0ed51b9d8a011..a3db7532c3a10 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -1003,12 +1003,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - if (getOldClusterVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 3d71f4a198aac..dbb18aec19fca 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -245,7 +245,6 @@ private String getNodeId(Predicate versionPredicate) throws IOException if (versionPredicate.test(version)) { return id; } - return id; } return null; } @@ -457,15 +456,10 @@ public void testRecoveryClosedIndex() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -491,14 +485,10 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index is created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index is created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -525,27 +515,20 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - if (minimumNodeVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - switch (CLUSTER_TYPE) { - case OLD: break; - case MIXED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); - break; - case UPGRADED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); - break; - } - } - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); + switch (CLUSTER_TYPE) { + case OLD: break; + case MIXED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); + break; + case UPGRADED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); + break; + } } /** * Returns the version in which the given index has been created diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index db46fb4424848..6d05ecd0b56b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -512,7 +511,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { } /** - * If the recovery source is on an old node (before

{@link LegacyESVersion#V_7_2_0}
) then the recovery target + * If the recovery source is on an old node (before
{@code LegacyESVersion#V_7_2_0}
) then the recovery target * won't have the safe commit after phase1 because the recovery source does not send the global checkpoint in the clean_files * step. And if the recovery fails and retries, then the recovery stage might not transition properly. This test simulates * this behavior by changing the global checkpoint in phase1 to unassigned. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index 72f28e94528ba..17a7d4c84b6fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -32,9 +32,7 @@ package org.opensearch.indices; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -246,13 +244,8 @@ public void testIndexStateShardChanged() throws Throwable { assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6)); assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1)); - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_2_0)) { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - } else { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED); - } + assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); + assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); } private static void assertShardStatesMatch( diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 1eb22a6bef3b5..283a6581a64bb 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,11 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 17ece23f819a2..e9f0d831d2977 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -601,7 +601,7 @@ public static void generateFailureXContent(XContentBuilder builder, Params param } t = t.getCause(); } - builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); + builder.field(ERROR, ExceptionsHelper.summaryMessage(e)); return; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 1dedf481dec56..84a7616fe6b06 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -90,11 +89,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { waitForEvents = Priority.readFrom(in); } waitForNoInitializingShards = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - indicesOptions = IndicesOptions.lenientExpandOpen(); - } + indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override @@ -122,9 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { Priority.writeTo(waitForEvents, out); } out.writeBoolean(waitForNoInitializingShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions.writeIndicesOptions(out); - } + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index e8429580ec887..4c71993251f4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -43,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.monitor.jvm.HotThreads; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -117,7 +117,7 @@ protected NodeHotThreads nodeOperation(NodeRequest request) { * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesHotThreadsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 7bcf83ba28111..ee7b287b878e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -126,7 +126,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { * * @opensearch.internal */ - public static class NodeInfoRequest extends BaseNodeRequest { + public static class NodeInfoRequest extends TransportRequest { NodesInfoRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d7ad4357fa046..920c66bc5c543 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -54,6 +53,7 @@ import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -188,7 +188,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesReloadSecureSettingsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 644c7f02d45f0..5d5d54c8fe7ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -127,7 +127,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { * * @opensearch.internal */ - public static class NodeStatsRequest extends BaseNodeRequest { + public static class NodeStatsRequest extends TransportRequest { NodesStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index c7612f7e15838..dbd3673149efe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; @@ -117,7 +117,7 @@ protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) { * * @opensearch.internal */ - public static class NodeUsageRequest extends BaseNodeRequest { + public static class NodeUsageRequest extends TransportRequest { NodesUsageRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index d78a4c95246b4..cb64718ed5843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -60,7 +60,6 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Create snapshot request @@ -124,9 +123,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } + userMetadata = in.readMap(); } @Override @@ -140,9 +137,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(waitForCompletion); out.writeBoolean(partial); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 86d0499a23f9e..e9bf564afaf32 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -36,7 +36,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -51,6 +50,7 @@ import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -207,7 +207,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) th * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a13932e137ab0..401813a6174fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -57,6 +56,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.Transports; @@ -216,7 +216,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq * * @opensearch.internal */ - public static class ClusterStatsNodeRequest extends BaseNodeRequest { + public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index eb2d2706a6531..bb28623430f2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -88,11 +88,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest tokens, DetailAnalyzeResponse detail) { } public Response(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } else { - tokens = null; - } - } + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } @@ -371,22 +358,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = null; - if (tokens != null) { - tokenArray = tokens.toArray(new AnalyzeToken[0]); - } - out.writeOptionalArray(tokenArray); - } else { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); out.writeOptionalWriteable(detail); } @@ -766,19 +742,7 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); - } - } else { - tokens = null; - } - } + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); } public String getName() { @@ -811,18 +775,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalArray(tokens); - } else { - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } + out.writeOptionalArray(tokens); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..1095cec447442 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -61,11 +60,7 @@ public CloseIndexRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } + waitForActiveShards = ActiveShardCount.readFrom(in); } public CloseIndexRequest() {} @@ -143,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..0388ea47bfc69 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Objects; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -62,12 +60,8 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; CloseIndexResponse(StreamInput in) throws IOException { - super(in, in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - indices = unmodifiableList(in.readList(IndexResult::new)); - } else { - indices = unmodifiableList(emptyList()); - } + super(in, true); + indices = unmodifiableList(in.readList(IndexResult::new)); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -82,12 +76,8 @@ public List getIndices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - writeShardsAcknowledged(out); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeList(indices); - } + writeShardsAcknowledged(out); + out.writeList(indices); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index fe39e2a254301..691b2c7c95730 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; @@ -205,11 +204,7 @@ public static class ShardRequest extends ReplicationRequest { ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - phase1 = in.readBoolean(); - } else { - phase1 = false; - } + phase1 = in.readBoolean(); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { @@ -228,9 +223,7 @@ public String toString() { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeBoolean(phase1); - } + out.writeBoolean(phase1); } public ClusterBlock clusterBlock() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 6026dd10c607b..6885de74e4479 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -34,16 +34,16 @@ import java.io.IOException; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; /** * Used when querying every node in the cluster for a specific dangling index. * * @opensearch.internal */ -public class NodeFindDanglingIndexRequest extends BaseNodeRequest { +public class NodeFindDanglingIndexRequest extends TransportRequest { private final String indexUUID; public NodeFindDanglingIndexRequest(String indexUUID) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index 9b737fff8316e..696daf75942fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.dangling.list; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class NodeListDanglingIndicesRequest extends BaseNodeRequest { +public class NodeListDanglingIndicesRequest extends TransportRequest { /** * Filter the response by index UUID. Leave as null to find all indices. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 9a24d8a42dc9d..fd3d6daa9c393 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -87,9 +86,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); includeSegmentFileSizes = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnloadedSegments = in.readBoolean(); - } + includeUnloadedSegments = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); @@ -111,9 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); out.writeBoolean(includeSegmentFileSizes); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnloadedSegments); - } + out.writeBoolean(includeUnloadedSegments); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 688568ba9a6d6..50d60560bdfe4 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -72,11 +72,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } + includeUnmapped = in.readBoolean(); indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; } @@ -109,9 +105,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } + out.writeBoolean(includeUnmapped); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index e5f644987182c..847cca25ceb35 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; @@ -87,11 +86,7 @@ private FieldCapabilitiesResponse( public FieldCapabilitiesResponse(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } + indices = in.readStringArray(); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } @@ -138,9 +133,7 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java index c90f75e3c0aed..de0c0dd9bbfc3 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; /** * Inner node get all pits request */ -public class GetAllPitNodeRequest extends BaseNodeRequest { +public class GetAllPitNodeRequest extends TransportRequest { public GetAllPitNodeRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java deleted file mode 100644 index b5ff1d60ff75b..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.nodes; - -import org.opensearch.LegacyESVersion; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportRequest; - -import java.io.IOException; - -/** - * Base class for node transport requests - * - * @opensearch.internal - * - * @deprecated this class is deprecated and classes will extend TransportRequest directly - */ -// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 -@Deprecated -public abstract class BaseNodeRequest extends TransportRequest { - - public BaseNodeRequest() {} - - public BaseNodeRequest(StreamInput in) throws IOException { - super(in); - if (in.getVersion().before(LegacyESVersion.V_7_3_0)) { - in.readString(); // previously nodeId - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_3_0)) { - out.writeString(""); // previously nodeId - } - } -} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 18fcdfad0bcc4..a12e9b753599d 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -70,7 +70,7 @@ public abstract class TransportNodesAction< NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, - NodeRequest extends BaseNodeRequest, + NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { protected final ThreadPool threadPool; diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 0ff373b6116de..4cbf0cfe70adb 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -67,7 +67,6 @@ import java.util.stream.Collectors; import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Meta data about snapshots that are currently executing @@ -296,11 +295,7 @@ private Entry(StreamInput in) throws IOException { shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); repositoryStateId = in.readLong(); failure = in.readOptionalString(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { @@ -736,9 +731,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(shards); out.writeLong(repositoryStateId); out.writeOptionalString(failure); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { Version.writeVersion(version, out); } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cd1c92a8b109f..b6ca8c52cd818 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1016,11 +1016,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 1; - } + aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -1051,9 +1047,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); @@ -1093,11 +1087,7 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { builder.version(in.readLong()); builder.mappingVersion(in.readVLong()); builder.settingsVersion(in.readVLong()); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - builder.aliasesVersion(in.readVLong()); - } else { - builder.aliasesVersion(1); - } + builder.aliasesVersion(in.readVLong()); builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -1140,9 +1130,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -1821,8 +1809,8 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices"; } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(LegacyESVersion.V_7_2_0)) { - assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0"; + if (Assertions.ENABLED) { + assert aliasesVersion : "aliases version should be present for indices"; } return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index fb8123f20e904..8e73a72d43219 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -39,7 +39,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.LegacyESVersion; import org.opensearch.action.AliasesRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.FeatureAware; @@ -986,11 +985,7 @@ private static class MetadataDiff implements Diff { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); - } else { - hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; - } + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); @@ -1004,9 +999,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); @@ -1037,9 +1030,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); - } + builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetadata.readFrom(in), false); @@ -1064,9 +1055,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); out.writeVInt(indices.size()); for (IndexMetadata indexMetadata : this) { indexMetadata.writeTo(out); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 4f1000e3407fd..6de69c5a6f8f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -840,10 +839,6 @@ static Tuple> closeRoutingTable( final Map verifyResult ) { - // Remove the index routing table of closed indices if the cluster is in a mixed version - // that does not support the replication of closed indices - final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(LegacyESVersion.V_7_2_0); - final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); @@ -916,16 +911,11 @@ static Tuple> closeRoutingTable( blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID); blocks.addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE); - if (removeRoutingTable) { - metadata.put(updatedMetadata); - routingTable.remove(index.getName()); - } else { - metadata.put( - updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) - .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) - ); - routingTable.addAsFromOpenToClose(metadata.getSafe(index)); - } + metadata.put( + updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) + .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) + ); + routingTable.addAsFromOpenToClose(metadata.getSafe(index)); logger.debug("closing index {} succeeded", index); closedIndices.add(index.getName()); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 199d79070c050..016c0aac44be6 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -49,7 +49,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -323,50 +322,30 @@ public DiscoveryNode(StreamInput in) throws IOException { } int rolesSize = in.readVInt(); final Set roles = new HashSet<>(rolesSize); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - for (int i = 0; i < rolesSize; i++) { - final String roleName = in.readString(); - final String roleNameAbbreviation = in.readString(); - final boolean canContainData; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - canContainData = in.readBoolean(); - } else { - canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); - } - final DiscoveryNodeRole role = roleMap.get(roleName); - if (role == null) { - if (in.getVersion().onOrAfter(Version.V_2_1_0)) { - roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); - } else { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); - } - } else { - assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; - assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" - + roleName - + "] does not match role [" - + role.roleNameAbbreviation() - + "]"; - roles.add(role); - } + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData; + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + canContainData = in.readBoolean(); + } else { + canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); } - } else { - // an old node will only send us legacy roles since pluggable roles is a new concept - for (int i = 0; i < rolesSize; i++) { - final LegacyRole legacyRole = in.readEnum(LegacyRole.class); - switch (legacyRole) { - case MASTER: - roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - break; - case DATA: - roles.add(DiscoveryNodeRole.DATA_ROLE); - break; - case INGEST: - roles.add(DiscoveryNodeRole.INGEST_ROLE); - break; - default: - throw new AssertionError(legacyRole.roleName()); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); } } this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); @@ -386,30 +365,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeVInt(roles.size()); - for (final DiscoveryNodeRole role : roles) { - final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); - out.writeString(compatibleRole.roleName()); - out.writeString(compatibleRole.roleNameAbbreviation()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeBoolean(compatibleRole.canContainData()); - } - } - } else { - // an old node will only understand legacy roles since pluggable roles is a new concept - final List rolesToWrite = roles.stream() - .filter(DiscoveryNodeRole.LEGACY_ROLES::contains) - .collect(Collectors.toList()); - out.writeVInt(rolesToWrite.size()); - for (final DiscoveryNodeRole role : rolesToWrite) { - if (role.isClusterManager()) { - out.writeEnum(LegacyRole.MASTER); - } else if (role.equals(DiscoveryNodeRole.DATA_ROLE)) { - out.writeEnum(LegacyRole.DATA); - } else if (role.equals(DiscoveryNodeRole.INGEST_ROLE)) { - out.writeEnum(LegacyRole.INGEST); - } + out.writeVInt(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); + out.writeString(compatibleRole.roleName()); + out.writeString(compatibleRole.roleNameAbbreviation()); + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + out.writeBoolean(compatibleRole.canContainData()); } } if (out.getVersion().before(Version.V_1_0_0) && version.onOrAfter(Version.V_1_0_0)) { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java index be914c6a40a83..ba1490a7929bd 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java @@ -37,7 +37,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -52,6 +51,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -168,7 +168,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodeRequest() {} NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index c43f539243d7a..5b79ca5970e63 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -65,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -307,7 +307,7 @@ protected void writeNodesTo(StreamOutput out, List nod * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index f426768119c1d..dceb26bc33aa7 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -39,8 +39,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -174,25 +172,21 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat if (requireCompleteHistory == false) { return; } - // Before 8.0 the global checkpoint is not known and up to date when the engine is created after + // Before 3.0 the global checkpoint is not known and up to date when the engine is created after // peer recovery, so we only check the max seq no / global checkpoint coherency when the global // checkpoint is different from the unassigned sequence number value. // In addition to that we only execute the check if the index the engine belongs to has been // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction // that guarantee that all operations have been flushed to Lucene. - final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0) - || (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { - assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); - if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { - throw new IllegalStateException( - "Maximum sequence number [" - + seqNoStats.getMaxSeqNo() - + "] from last commit does not match global checkpoint [" - + seqNoStats.getGlobalCheckpoint() - + "]" - ); - } + assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); + if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { + throw new IllegalStateException( + "Maximum sequence number [" + + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + + seqNoStats.getGlobalCheckpoint() + + "]" + ); } } diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index e2f23353f250e..5da4f8d5c7833 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -32,7 +32,6 @@ package org.opensearch.index.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -105,20 +104,8 @@ public GetResult(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - documentFields = readFields(in); - metaFields = readFields(in); - } else { - Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - fields.forEach( - (fieldName, docField) -> (MapperService.META_FIELDS_BEFORE_7DOT8.contains(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields = readFields(in); + metaFields = readFields(in); } else { metaFields = Collections.emptyMap(); documentFields = Collections.emptyMap(); @@ -446,12 +433,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - writeFields(out, documentFields); - writeFields(out, metaFields); - } else { - writeFields(out, this.getFields()); - } + writeFields(out, documentFields); + writeFields(out, metaFields); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 82bd8ebb4d362..15aae1774213a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -69,7 +69,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; @@ -422,7 +421,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(LegacyESVersion.V_7_2_1) ? name() : buildFullName(context); + String fullName = buildFullName(context); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index cc69a44c9c80c..e1cb0cdff4ebd 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -166,11 +165,7 @@ public Match(StreamInput in) throws IOException { } this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - this.useField = in.readOptionalString(); - } else { - this.useField = null; - } + this.useField = in.readOptionalString(); } @Override @@ -234,9 +229,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(useField); - } + out.writeOptionalString(useField); } @Override diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 8a54e5105c61e..fc874608fb3b1 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,7 +32,6 @@ package org.opensearch.index.refresh; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -68,10 +67,8 @@ public RefreshStats() {} public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - externalTotal = in.readVLong(); - externalTotalTimeInMillis = in.readVLong(); - } + externalTotal = in.readVLong(); + externalTotalTimeInMillis = in.readVLong(); listeners = in.readVInt(); } @@ -79,10 +76,8 @@ public RefreshStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(externalTotal); - out.writeVLong(externalTotalTimeInMillis); - } + out.writeVLong(externalTotal); + out.writeVLong(externalTotalTimeInMillis); out.writeVInt(listeners); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 55d95381923b3..63995ae121abb 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1495,7 +1495,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert primaryMode == false; if (primaryContext.checkpoints.containsKey(shardAllocationId) == false) { // can happen if the old primary was on an old version - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); + assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.fromId(7000099)); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java index 6c597fcd086c4..d346ec5c975f4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; @@ -76,11 +74,7 @@ public RecoveryCleanFilesRequest( shardId = new ShardId(in); snapshotFiles = new Store.MetadataSnapshot(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - globalCheckpoint = in.readZLong(); - } else { - globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + globalCheckpoint = in.readZLong(); } @Override @@ -90,9 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); snapshotFiles.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeZLong(globalCheckpoint); - } + out.writeZLong(globalCheckpoint); } public Store.MetadataSnapshot sourceMetaSnapshot() { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java index eacfb87ecc732..32560bc211669 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.seqno.RetentionLeases; @@ -139,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(maxSeenAutoIdTimestampOnPrimary); out.writeZLong(maxSeqNoOfUpdatesOrDeletesOnPrimary); retentionLeases.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(mappingVersionOnPrimary); - } + out.writeVLong(mappingVersionOnPrimary); } } diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index b49cdcd127962..0e452fcbbc7a9 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -68,6 +67,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -410,7 +410,7 @@ protected void writeNodesTo(StreamOutput out, List nodes * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 6b6d33717bab4..f7e3de4eb988f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.io.stream.StreamInput; @@ -150,17 +149,13 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - minimumIntervalExpression = in.readOptionalString(); - } + minimumIntervalExpression = in.readOptionalString(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalString(minimumIntervalExpression); - } + out.writeOptionalString(minimumIntervalExpression); } protected AutoDateHistogramAggregationBuilder( @@ -321,17 +316,7 @@ public RoundingInfo(StreamInput in) throws IOException { roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - dateTimeUnit = in.readString(); - } else { - /* - * This *should* be safe because we only deserialize RoundingInfo - * when reading result and results don't actually use this at all. - * We just set it to something non-null to line up with the normal - * ctor. "seconds" is the smallest unit anyway. - */ - dateTimeUnit = "second"; - } + dateTimeUnit = in.readString(); } @Override @@ -340,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeString(dateTimeUnit); - } + out.writeString(dateTimeUnit); } public int getMaximumInnerInterval() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index d378bb2ec1bd2..f8a4c4ffd9cba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.DateTimeUnit; @@ -143,21 +142,8 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - long interval = in.readLong(); - DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); - - if (histoInterval != null) { - dateHistogramInterval = histoInterval; - intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; - } else { - dateHistogramInterval = new DateHistogramInterval(interval + "ms"); - intervalType = IntervalTypeEnum.LEGACY_INTERVAL; - } - } else { - dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - intervalType = IntervalTypeEnum.fromStream(in); - } + dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + intervalType = IntervalTypeEnum.fromStream(in); } public IntervalTypeEnum getIntervalType() { @@ -402,20 +388,8 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { - out.writeLong( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo") - .getMillis() - ); - } else { - out.writeLong(0L); - } - out.writeOptionalWriteable(dateHistogramInterval); - } else { - out.writeOptionalWriteable(dateHistogramInterval); - intervalType.writeTo(out); - } + out.writeOptionalWriteable(dateHistogramInterval); + intervalType.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java index 96e7541de25d9..35449a28c9087 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; @@ -84,13 +83,7 @@ public InternalGeoCentroid(StreamInput in) throws IOException { super(in); count = in.readVLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - centroid = new GeoPoint(in.readDouble(), in.readDouble()); - } else { - final long hash = in.readLong(); - centroid = new GeoPoint(decodeLatitude(hash), decodeLongitude(hash)); - } - + centroid = new GeoPoint(in.readDouble(), in.readDouble()); } else { centroid = null; } @@ -101,12 +94,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeDouble(centroid.lat()); - out.writeDouble(centroid.lon()); - } else { - out.writeLong(encodeLatLon(centroid.lat(), centroid.lon())); - } + out.writeDouble(centroid.lat()); + out.writeDouble(centroid.lon()); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index dceacd57d623e..31fdc5c9d9e9d 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -45,7 +45,6 @@ import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.profile.NetworkTime; @@ -54,7 +53,6 @@ import java.io.IOException; -import static java.util.Collections.emptyList; import static org.opensearch.common.lucene.Lucene.readTopDocs; import static org.opensearch.common.lucene.Lucene.writeTopDocs; @@ -361,10 +359,6 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - // The list of PipelineAggregators is sent by old versions. We don't need it anyway. - in.readNamedWriteableList(PipelineAggregator.class); - } } else { if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); @@ -410,35 +404,11 @@ public void writeToNoId(StreamOutput out) throws IOException { writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(emptyList()); - } } else { out.writeBoolean(true); if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { InternalAggregations aggs = aggregations.expand(); aggs.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(aggs.getTopLevelPipelineAggregators()); - } } else { aggregations.writeTo(out); } diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index b4a93ec9869e6..395ebaf2523e7 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.index.Terms; import org.apache.lucene.search.SortField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -161,9 +160,7 @@ public FieldSortBuilder(StreamInput in) throws IOException { sortMode = in.readOptionalWriteable(SortMode::readFromStream); unmappedType = in.readOptionalString(); nestedSort = in.readOptionalWriteable(NestedSortBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - numericType = in.readOptionalString(); - } + numericType = in.readOptionalString(); } @Override @@ -176,9 +173,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sortMode); out.writeOptionalString(unmappedType); out.writeOptionalWriteable(nestedSort); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(numericType); - } + out.writeOptionalString(numericType); } /** Returns the document field this sort should be based on. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index d7ebba721a52c..38d9df0e960e0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -73,7 +73,6 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - public static final Version METADATA_FIELD_INTRODUCED = LegacyESVersion.V_7_3_0; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; @@ -401,11 +400,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { shardFailures = Collections.unmodifiableList(in.readList(SnapshotShardFailure::new)); version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { dataStreams = in.readStringList(); } else { @@ -840,11 +835,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeMap(userMetadata); + if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { + out.writeStringCollection(dataStreams); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5b2b4f361083b..ffd3a66ad1d48 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.node.DiscoveryNode; @@ -55,6 +54,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -78,7 +78,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { - public static class CancellableNodeRequest extends BaseNodeRequest { + public static class CancellableNodeRequest extends TransportRequest { protected String requestName; public CancellableNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 5d947a743385f..d49dd14492327 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -32,6 +31,7 @@ import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.tasks.MockTaskManagerListener; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -56,7 +56,7 @@ public class ResourceAwareTasksTests extends TaskManagerTestCase { private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - public static class ResourceAwareNodeRequest extends BaseNodeRequest { + public static class ResourceAwareNodeRequest extends TransportRequest { protected String requestName; public ResourceAwareNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 68cf69e30f8a6..8b0c2187d05af 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -37,7 +37,6 @@ import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -66,6 +65,7 @@ import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.junit.After; @@ -156,8 +156,8 @@ public int failureCount() { /** * Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager */ - abstract class AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> extends - TransportNodesAction { + abstract class AbstractTestNodesAction, NodeRequest extends TransportRequest> + extends TransportNodesAction { AbstractTestNodesAction( String actionName, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 3bb1957c69fb4..aa0e9511f86ce 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -41,7 +41,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -182,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected final String requestName; protected final boolean shouldBlock; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 7590bf88eeca0..97a045872477d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -44,7 +44,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.action.support.tasks.BaseTasksResponse; @@ -67,6 +66,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -91,7 +91,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected String requestName; public NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java index 4a90a23cbd2f0..07246f144d95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -82,11 +82,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), indicesOptions); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); - } else { - assertEquals(0, in.available()); - } + assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); } } } @@ -100,9 +96,7 @@ public void testBwcSerialization() throws Exception { out.writeTimeValue(sample.timeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - sample.waitForActiveShards().writeTo(out); - } + sample.waitForActiveShards().writeTo(out); final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { @@ -119,11 +113,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); - } else { - assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards()); - } + assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); } } } diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 86657a98d9a1d..76142efc60b7d 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -50,6 +50,7 @@ import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -378,7 +379,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) thro } } - private static class TestNodeRequest extends BaseNodeRequest { + private static class TestNodeRequest extends TransportRequest { TestNodeRequest() {} TestNodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index 089dfcaf65517..bf33d7a45f4fb 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -286,7 +286,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(LegacyESVersion.V_7_2_0); + out.setVersion(LegacyESVersion.fromId(7000099)); request.writeTo(out); // First check the type on the stream was written as "_doc" by manually parsing the stream until the type @@ -302,7 +302,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // now read the stream as normal to check it is parsed correct if received from an older node opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); - opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0); + opensearchBuffer.setVersion(LegacyESVersion.fromId(7000099)); TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); From 77cff55bd431c0e2f8fdea5a40c83f8e8bddeca1 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 7 Oct 2022 16:29:39 -0500 Subject: [PATCH 040/122] [Remove] LegacyESVersion.V_7_4_* and V_7_5_* constants (#4704) Removes all usages of LegacyESVersion.V_7_4_ and LegacyESVersion.V_7_5 version constants along with ancient API logic. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/upgrades/IndexingIT.java | 13 +- .../DedicatedClusterSnapshotRestoreIT.java | 40 -- .../java/org/opensearch/LegacyESVersion.java | 6 - .../org/opensearch/OpenSearchException.java | 9 +- .../NodesReloadSecureSettingsRequest.java | 18 +- .../TransportCleanupRepositoryAction.java | 16 +- .../create/TransportCreateSnapshotAction.java | 14 +- .../snapshots/status/SnapshotStatus.java | 18 +- .../shards/IndicesShardStoresResponse.java | 15 +- .../admin/indices/shrink/ResizeRequest.java | 4 - .../opensearch/action/index/IndexRequest.java | 18 +- .../ingest/SimulateDocumentBaseResult.java | 29 +- .../cluster/RepositoryCleanupInProgress.java | 2 +- .../cluster/routing/UnassignedInfo.java | 11 +- .../org/opensearch/index/IndexSettings.java | 4 +- .../query/VectorGeoShapeQueryProcessor.java | 2 +- .../ScriptScoreQueryBuilder.java | 13 +- .../index/seqno/ReplicationTracker.java | 10 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../RecoveryFinalizeRecoveryRequest.java | 12 +- ...ryPrepareForTranslogOperationsRequest.java | 7 - .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryState.java | 9 +- .../TransportNodesListShardStoreMetadata.java | 10 +- .../repositories/FilterRepository.java | 6 - .../opensearch/repositories/Repository.java | 14 - .../blobstore/BlobStoreRepository.java | 16 - .../repositories/blobstore/package-info.java | 16 +- .../rest/action/document/RestIndexAction.java | 3 +- .../CompositeValuesSourceParserHelper.java | 10 - .../MovFnPipelineAggregationBuilder.java | 11 +- .../pipeline/MovFnPipelineAggregator.java | 11 +- .../snapshots/SnapshotsService.java | 368 +----------------- .../GeoShapeQueryBuilderGeoShapeTests.java | 20 +- .../RepositoriesServiceTests.java | 5 - .../index/shard/RestoreOnlyRepository.java | 4 - .../test/rest/OpenSearchRestTestCase.java | 4 +- 38 files changed, 64 insertions(+), 712 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ee30dc84e9fd..37e16aecff69a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) ### Fixed diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..888fa886c3c5e 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -181,18 +181,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 283a6581a64bb..6e2aeaa5f7b4f 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,12 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index e9f0d831d2977..78bda1cf088cd 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -317,10 +316,6 @@ public void writeTo(StreamOutput out) throws IOException { public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -1569,13 +1564,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b721c8f005974 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -114,16 +114,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 381eca2dc716f..ceff8dcbc4b55 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -153,12 +153,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - isPipelineResolved = in.readBoolean(); - } + finalPipeline = in.readOptionalString(); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -639,7 +635,7 @@ public void resolveRouting(Metadata metadata) { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.fromId(7050099))) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id until all nodes " + "are on version 7.5.0 or higher" ); @@ -671,12 +667,8 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java index 2440a1802912b..f36ca0e7d7379 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java @@ -31,7 +31,6 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -94,34 +93,14 @@ public SimulateDocumentBaseResult(Exception failure) { * Read from a stream. */ public SimulateDocumentBaseResult(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - failure = in.readException(); - ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); - } else { - if (in.readBoolean()) { - ingestDocument = null; - failure = in.readException(); - } else { - ingestDocument = new WriteableIngestDocument(in); - failure = null; - } - } + failure = in.readException(); + ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeException(failure); - out.writeOptionalWriteable(ingestDocument); - } else { - if (failure == null) { - out.writeBoolean(false); - ingestDocument.writeTo(out); - } else { - out.writeBoolean(true); - out.writeException(failure); - } - } + out.writeException(failure); + out.writeOptionalWriteable(ingestDocument); } public IngestDocument getIngestDocument() { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 2cf9d66fee2bd..291aa88a3fb3e 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -114,7 +114,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return LegacyESVersion.V_7_4_0; + return LegacyESVersion.fromId(7040099); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 489c6125f7d13..ed5a23cd09e40 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -314,11 +313,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } else { - this.failedNodeIds = Collections.emptySet(); - } + this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public void writeTo(StreamOutput out) throws IOException { @@ -330,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeCollection(failedNodeIds, StreamOutput::writeString); - } + out.writeCollection(failedNodeIds, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c7f4804755d4..00daea147f16f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; @@ -1122,8 +1121,7 @@ public int getTranslogRetentionTotalFiles() { } private static boolean shouldDisableTranslogRetention(Settings settings) { - return INDEX_SOFT_DELETES_SETTING.get(settings) - && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_4_0); + return INDEX_SOFT_DELETES_SETTING.get(settings); } /** diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index e11b22e9296cf..d50585ae0aebf 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -67,7 +67,7 @@ public class VectorGeoShapeQueryProcessor { public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.fromId(7050099))) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); } // wrap geoQuery as a ConstantScoreQuery diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 9605ba424bfb0..ef606ce35b84f 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.index.query.functionscore; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -123,22 +122,14 @@ public ScriptScoreQueryBuilder(QueryBuilder query, Script script) { public ScriptScoreQueryBuilder(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script = new Script(in); - } else { - script = in.readNamedWriteable(ScriptScoreFunctionBuilder.class).getScript(); - } + script = new Script(in); minScore = in.readOptionalFloat(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(query); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script.writeTo(out); - } else { - out.writeNamedWriteable(new ScriptScoreFunctionBuilder(script)); - } + script.writeTo(out); out.writeOptionalFloat(minScore); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 63995ae121abb..ea1604c16190b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -223,7 +223,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Whether there should be a peer recovery retention lease (PRRL) for every tracked shard copy. Always true on indices created from - * {@link LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an + * {@code LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an * earlier version if we recently did a rolling upgrade and * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} has not yet completed. Is only permitted * to change from false to true; can be removed once support for pre-PRRL indices is no longer needed. @@ -996,9 +996,7 @@ public ReplicationTracker( this.routingTable = null; this.replicationGroup = null; this.hasAllPeerRecoveryRetentionLeases = indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0) - || (indexSettings.isSoftDeleteEnabled() - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_4_0) - && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); + || (indexSettings.isSoftDeleteEnabled() && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; @@ -1126,7 +1124,7 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { /** * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@link LegacyESVersion#V_7_4_0}, in which case the missing leases should be created + * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { @@ -1528,7 +1526,7 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { /** * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@link LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d05f7c34f80ce..52ecc5bc66607 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -52,7 +52,6 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -3187,7 +3186,7 @@ public RetentionLease addPeerRecoveryRetentionLease( ) { assert assertPrimaryMode(); // only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs: - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_4_0) || indexSettings.isSoftDeleteEnabled() == false; + assert indexSettings.isSoftDeleteEnabled() == false; return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index a7334fba15664..446fb78958db4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -57,11 +55,7 @@ final class RecoveryFinalizeRecoveryRequest extends RecoveryTransportRequest { recoveryId = in.readLong(); shardId = new ShardId(in); globalCheckpoint = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - trimAboveSeqNo = in.readZLong(); - } else { - trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + trimAboveSeqNo = in.readZLong(); } RecoveryFinalizeRecoveryRequest( @@ -100,9 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeZLong(globalCheckpoint); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeZLong(trimAboveSeqNo); - } + out.writeZLong(trimAboveSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index bdacb0b724884..68979fa4b69bc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.shard.ShardId; @@ -62,9 +61,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends RecoveryTransportReque recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - in.readBoolean(); // was fileBasedRecovery - } } public long recoveryId() { @@ -85,8 +81,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeBoolean(true); // was fileBasedRecovery - } } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 665e79722770e..505d3c7adfb3f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -720,8 +719,7 @@ void createRetentionLease(final long startingSeqNo, ActionListener addRetentionLeaseStep = new StepListener<>(); final long estimatedGlobalCheckpoint = startingSeqNo - 1; final RetentionLease newLease = shard.addPeerRecoveryRetentionLease( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 57208ab029bf4..de2ee1b8512b4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -430,9 +429,7 @@ public Translog(StreamInput in) throws IOException { recovered = in.readVInt(); total = in.readVInt(); totalOnStart = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - totalLocal = in.readVInt(); - } + totalLocal = in.readVInt(); } @Override @@ -441,9 +438,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(recovered); out.writeVInt(total); out.writeVInt(totalOnStart); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeVInt(totalLocal); - } + out.writeVInt(totalLocal); } public synchronized void reset() { diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 0e452fcbbc7a9..6189c983e3c8a 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -253,20 +253,14 @@ public StoreFilesMetadata( public StoreFilesMetadata(StreamInput in) throws IOException { this.shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } else { - this.peerRecoveryRetentionLeases = Collections.emptyList(); - } + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); } @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); metadataSnapshot.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeList(peerRecoveryRetentionLeases); - } + out.writeList(peerRecoveryRetentionLeases); } public ShardId shardId() { diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index aaa021a0e8b93..a6a649fa2cd44 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -95,11 +94,6 @@ public void getRepositoryData(ActionListener listener) { in.getRepositoryData(listener); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - in.initializeSnapshot(snapshotId, indices, metadata); - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index a16e0e8d441bc..1826fe1aa51da 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -129,19 +128,6 @@ default Repository create(RepositoryMetadata metadata, Function listener); - /** - * Starts snapshotting process - * - * @param snapshotId snapshot id - * @param indices list of indices to be snapshotted - * @param metadata cluster metadata - * - * @deprecated this method is only used when taking snapshots in a mixed version cluster where a cluster-manager node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. - */ - @Deprecated - void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata); - /** * Finalizes snapshotting process *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c36d92abcf498..bf06191bdc8d3 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.opensearch.repositories.RepositoryVerificationException; import org.opensearch.repositories.ShardGenerations; import org.opensearch.snapshots.AbortedSnapshotException; -import org.opensearch.snapshots.SnapshotCreationException; import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -713,21 +712,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata clusterMetadata) { - try { - // Write Global Metadata - GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress); - - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - INDEX_METADATA_FORMAT.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), compress); - } - } catch (IOException ex) { - throw new SnapshotCreationException(metadata.name(), snapshotId, ex); - } - } - @Override public void deleteSnapshots( Collection snapshotIds, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index aacd386cd4bd7..b13a63ef77f6a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -163,23 +163,9 @@ * *

Creating a snapshot in the repository happens in the three steps described in detail below.

* - *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

- * - *

In mixed version clusters that contain a node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a - * call to {@link org.opensearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the - * following actions:

- *
    - *
  1. Verify that no snapshot by the requested name exists.
  2. - *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. - *
  5. Write the metadata for each index to a blob in that index's directory at - * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. - *
- * TODO: Remove this section once BwC logic it references is removed - * *

Writing Shard Data (Segments)

* - *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + *

The snapshot process writes the actual shard data * to the repository by invoking {@link org.opensearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries * for the shards in the current snapshot. It is implemented as follows:

* diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index e7b1da91aba8f..bd2c11cf71ff1 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.document; -import org.opensearch.LegacyESVersion; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActiveShardCount; @@ -128,7 +127,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { + if (request.params().get("op_type") == null) { // default to op_type create request.params().put("op_type", "create"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index d8526e684f391..6ca64c2186cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; @@ -103,15 +102,6 @@ public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); - if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 33c09e04bd4b0..501f1af63b3d9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -114,11 +113,7 @@ public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException { format = in.readOptionalString(); gapPolicy = GapPolicy.readFrom(in); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -128,9 +123,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(format); gapPolicy.writeTo(out); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 70652e7ddce44..7b20a796b8134 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; @@ -106,11 +105,7 @@ public MovFnPipelineAggregator(StreamInput in) throws IOException { gapPolicy = BucketHelpers.GapPolicy.readFrom(in); bucketsPath = in.readString(); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -120,9 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); out.writeString(bucketsPath); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 4f672c9813d64..e53c2889f88e6 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -90,7 +90,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.repositories.IndexId; @@ -142,12 +141,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - /** - * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, Metadata)} to write snapshot metadata - * when starting a snapshot. - */ - public static final Version NO_REPO_INITIALIZE_VERSION = LegacyESVersion.V_7_5_0; - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; @@ -156,7 +149,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.V_7_5_0; + public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; @@ -244,144 +237,6 @@ public SnapshotsService( } } - /** - * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of - * the snapshot. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot completion listener - */ - public void executeSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshotLegacy( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); - } - - /** - * Initializes the snapshotting process. - *

- * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and - * creates a snapshot record in cluster state metadata. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot creation listener - */ - public void createSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot - Repository repository = repositoriesService.repository(request.repository()); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - clusterService.submitStateUpdateTask("create_snapshot [" + snapshotName + ']', new ClusterStateUpdateTask() { - - private List indices; - - private SnapshotsInProgress.Entry newEntry; - - @Override - public ClusterState execute(ClusterState currentState) { - validate(repositoryName, snapshotName, currentState); - SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); - if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the cluster state anyway in #applyClusterState. - if (snapshots != null - && snapshots.entries() - .stream() - .anyMatch( - entry -> (entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false) == false - )) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } - // Store newSnapshot here to be processed in clusterStateProcessed - indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); - - final List dataStreams = indexNameExpressionResolver.dataStreamNames( - currentState, - request.indicesOptions(), - request.indices() - ); - - logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); - newEntry = new SnapshotsInProgress.Entry( - new Snapshot(repositoryName, snapshotId), - request.includeGlobalState(), - request.partial(), - State.INIT, - Collections.emptyList(), // We'll resolve the list of indices when moving to the STARTED state in #beginSnapshot - dataStreams, - threadPool.absoluteTimeInMillis(), - RepositoryData.UNKNOWN_REPO_GEN, - ImmutableOpenMap.of(), - userMeta, - Version.CURRENT - ); - initializingSnapshots.add(newEntry.snapshot()); - snapshots = SnapshotsInProgress.of(Collections.singletonList(newEntry)); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newEntry != null) { - initializingSnapshots.remove(newEntry.snapshot()); - } - newEntry = null; - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - if (newEntry != null) { - final Snapshot current = newEntry.snapshot(); - assert initializingSnapshots.contains(current); - assert indices != null; - beginSnapshot(newState, newEntry, request.partial(), indices, repository, new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); - } - } - - @Override - public TimeValue timeout() { - return request.clusterManagerNodeTimeout(); - } - }); - } - /** * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of * the snapshot. @@ -946,227 +801,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - /** - * Starts snapshot. - *

- * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param clusterState cluster state - * @param snapshot snapshot meta data - * @param partial allow partial snapshots - * @param userCreateSnapshotListener listener - */ - private void beginSnapshot( - final ClusterState clusterState, - final SnapshotsInProgress.Entry snapshot, - final boolean partial, - final List indices, - final Repository repository, - final ActionListener userCreateSnapshotListener - ) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - - boolean hadAbortedInitializations; - - @Override - protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); - if (repository.isReadOnly()) { - throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); - } - final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); - final StepListener repositoryDataListener = new StepListener<>(); - repository.getRepositoryData(repositoryDataListener); - repositoryDataListener.whenComplete(repositoryData -> { - // check if the snapshot name already exists in the repository - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException( - repository.getMetadata().name(), - snapshotName, - "snapshot with the same name already exists" - ); - } - if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager - // failover to an - // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a - // valid - // snapshot. - repository.initializeSnapshot( - snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - metadataForSnapshot(snapshot, clusterState.metadata()) - ); - } - - logger.info("snapshot [{}] started", snapshot.snapshot()); - final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null); - if (indices.isEmpty()) { - // No indices in this snapshot - we are done - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot( - SnapshotsInProgress.startedEntry( - snapshot.snapshot(), - snapshot.includeGlobalState(), - snapshot.partial(), - Collections.emptyList(), - Collections.emptyList(), - threadPool.absoluteTimeInMillis(), - repositoryData.getGenId(), - ImmutableOpenMap.of(), - snapshot.userMetadata(), - version - ), - clusterState.metadata(), - repositoryData - ); - return; - } - clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) == false) { - entries.add(entry); - continue; - } - - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = shards( - snapshots, - currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), - currentState.metadata(), - currentState.routingTable(), - indexIds, - useShardGenerations(version), - repositoryData, - entry.repository() - ); - if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards( - shards, - currentState.metadata() - ); - Set missing = indicesWithMissingShards.v1(); - Set closed = indicesWithMissingShards.v2(); - if (missing.isEmpty() == false || closed.isEmpty() == false) { - final StringBuilder failureMessage = new StringBuilder(); - if (missing.isEmpty() == false) { - failureMessage.append("Indices don't have primary shards "); - failureMessage.append(missing); - } - if (closed.isEmpty() == false) { - if (failureMessage.length() > 0) { - failureMessage.append("; "); - } - failureMessage.append("Indices are closed "); - failureMessage.append(closed); - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.FAILED, - indexIds, - repositoryData.getGenId(), - shards, - version, - failureMessage.toString() - ) - ); - continue; - } - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.STARTED, - indexIds, - repositoryData.getGenId(), - shards, - version, - null - ) - ); - } - } - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), - e - ); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - - @Override - public void onNoLongerClusterManager(String source) { - // We are not longer a cluster-manager - we shouldn't try to do any cleanup - // The new cluster-manager will take care of it - logger.warn( - "[{}] failed to create snapshot - no longer a cluster-manager", - snapshot.snapshot().getSnapshotId() - ); - userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") - ); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted - // for processing. If client wants to wait for the snapshot completion, it can register snapshot - // completion listener in this method. For the snapshot completion to work properly, the snapshot - // should still exist when listener is registered. - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry, newState.metadata(), repositoryData); - } else { - endCompletedSnapshots(newState); - } - } - }); - }, this::onFailure); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - }); - } - private static class CleanupAfterErrorListener { private final ActionListener userCreateSnapshotListener; diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java index f588767d5336d..f8f512e5aefc6 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.query; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.test.geo.RandomShapeGenerator; @@ -73,21 +72,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } } if (randomBoolean()) { - QueryShardContext context = createShardContext(); - if (context.indexVersionCreated().onOrAfter(LegacyESVersion.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); - } + if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { + builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..da44643de98a5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -246,11 +246,6 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(null); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 5ed85fedc8cea..2a85fffa8699a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -55,7 +55,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -116,9 +115,6 @@ public void getRepositoryData(ActionListener listener) { ); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) {} - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f2b68b6fdaca0..bbf7763551bcd 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -531,8 +531,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced - // in version 7.4 + if (nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies(); From e207dfcfa1daeac509971bc5ed94f297137592a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 13:55:00 -0700 Subject: [PATCH 041/122] Bump protobuf-java from 3.21.2 to 3.21.7 in /test/fixtures/hdfs-fixture (#4727) * Bump protobuf-java from 3.21.2 to 3.21.7 in /test/fixtures/hdfs-fixture Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.21.2 to 3.21.7. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.21.2...v3.21.7) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37e16aecff69a..34dcef6077b85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Dependencies - Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7 ### Added @@ -162,4 +163,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 2e541572b7385..d01db6376db42 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -46,5 +46,5 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" - api "com.google.protobuf:protobuf-java:3.21.2" + api "com.google.protobuf:protobuf-java:3.21.7" } From 9f415ebe87c1e26cde4f59da8fc93764c0ffa27d Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 10 Oct 2022 22:00:37 -0500 Subject: [PATCH 042/122] [Refactor] Base Action class javadocs to OpenSearch.API (#4732) Refactors base action classes implemented by external plugins through ActionPlugin from opensearch.internal to opensearch.api to signal extensibility to third-party developers. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 3 ++- server/src/main/java/org/opensearch/action/ActionFuture.java | 2 +- .../src/main/java/org/opensearch/action/ActionListener.java | 2 +- .../org/opensearch/action/ActionListenerResponseHandler.java | 2 +- server/src/main/java/org/opensearch/action/ActionRequest.java | 4 ++-- .../main/java/org/opensearch/action/ActionRequestBuilder.java | 2 +- .../opensearch/action/ActionRequestValidationException.java | 4 ++-- .../src/main/java/org/opensearch/action/ActionResponse.java | 4 ++-- .../src/main/java/org/opensearch/action/ActionRunnable.java | 2 +- server/src/main/java/org/opensearch/action/ActionType.java | 2 +- 10 files changed, 14 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34dcef6077b85..75fe1e844fa74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) ### Deprecated @@ -163,4 +164,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/action/ActionFuture.java b/server/src/main/java/org/opensearch/action/ActionFuture.java index 77b748f50bfbf..d796180eda021 100644 --- a/server/src/main/java/org/opensearch/action/ActionFuture.java +++ b/server/src/main/java/org/opensearch/action/ActionFuture.java @@ -40,7 +40,7 @@ /** * An extension to {@link Future} allowing for simplified "get" operations. * - * @opensearch.internal + * @opensearch.api */ public interface ActionFuture extends Future { diff --git a/server/src/main/java/org/opensearch/action/ActionListener.java b/server/src/main/java/org/opensearch/action/ActionListener.java index 8f632449c7d91..645ed4deec006 100644 --- a/server/src/main/java/org/opensearch/action/ActionListener.java +++ b/server/src/main/java/org/opensearch/action/ActionListener.java @@ -46,7 +46,7 @@ /** * A listener for action responses or failures. * - * @opensearch.internal + * @opensearch.api */ public interface ActionListener { /** diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 26e9ba8621c53..af8fde4c9893c 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -46,7 +46,7 @@ * A simple base class for action response listeners, defaulting to using the SAME executor (as its * very common on response handlers). * - * @opensearch.internal + * @opensearch.api */ public class ActionListenerResponseHandler implements TransportResponseHandler { diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index c6d8eb9f273d6..a6879dd98691a 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -39,9 +39,9 @@ import java.io.IOException; /** - * Base action request + * Base action request implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequest extends TransportRequest { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java index d1fddb076b350..27358a0412468 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java @@ -40,7 +40,7 @@ /** * Base Action Request Builder * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index d7da932c4dfc2..ffba4d2eb50c0 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -35,8 +35,8 @@ import org.opensearch.common.ValidationException; /** - * Base exception for an action request validation + * Base exception for an action request validation extendable by plugins * - * @opensearch.internal + * @opensearch.api */ public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/server/src/main/java/org/opensearch/action/ActionResponse.java index c72fc87ccfaf8..ab0544365c0b9 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/server/src/main/java/org/opensearch/action/ActionResponse.java @@ -38,9 +38,9 @@ import java.io.IOException; /** - * Base class for responses to action requests. + * Base class for responses to action requests implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionResponse extends TransportResponse { diff --git a/server/src/main/java/org/opensearch/action/ActionRunnable.java b/server/src/main/java/org/opensearch/action/ActionRunnable.java index c718b33bd404a..2c3f70afda75d 100644 --- a/server/src/main/java/org/opensearch/action/ActionRunnable.java +++ b/server/src/main/java/org/opensearch/action/ActionRunnable.java @@ -41,7 +41,7 @@ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught * exception or error is thrown while the actual action is run. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRunnable extends AbstractRunnable { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index 9c17061990abe..c22cddd6fad71 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -39,7 +39,7 @@ /** * A generic action. Should strive to make it a singleton. * - * @opensearch.internal + * @opensearch.api */ public class ActionType { From 21d89b28839d26068dceb162fb92a4188ed0b67c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 07:07:50 -0700 Subject: [PATCH 043/122] Bump azure-core from 1.31.0 to 1.33.0 in /plugins/repository-azure (#4726) * Bump azure-core from 1.31.0 to 1.33.0 in /plugins/repository-azure Bumps [azure-core](https://github.com/Azure/azure-sdk-for-java) from 1.31.0 to 1.33.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.31.0...azure-core_1.33.0) --- updated-dependencies: - dependency-name: com.azure:azure-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 75fe1e844fa74..d37e38b38fdf9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Dependencies - Bumps `gson` from 2.9.0 to 2.9.1 - Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `azure-core` from 1.31.0 to 1.33.0 ### Added diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 8ca151d1e90db..6be815e4a0f46 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-core:1.33.0' api 'com.azure:azure-storage-common:12.18.1' api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 deleted file mode 100644 index 6a5076b3da301..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..9077fc4ebf84b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 @@ -0,0 +1 @@ +93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file From cd8f0fadd0cea5035a325a647c161e17ff5d7384 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 08:46:57 -0700 Subject: [PATCH 044/122] Bump avro from 1.11.0 to 1.11.1 in /plugins/repository-hdfs (#4076) * Bump avro from 1.11.0 to 1.11.1 in /plugins/repository-hdfs Bumps avro from 1.11.0 to 1.11.1. --- updated-dependencies: - dependency-name: org.apache.avro:avro dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d37e38b38fdf9..273c7d2eb088c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `gson` from 2.9.0 to 2.9.1 - Bumps `protobuf-java` from 3.21.2 to 3.21.7 - Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 ### Added @@ -165,4 +166,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6fd91f78a63e6..3cdac4c2d2560 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -65,7 +65,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.0' + api 'org.apache.avro:avro:1.11.1' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 deleted file mode 100644 index 9a0601879a1fc..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 new file mode 100644 index 0000000000000..f03424516b44e --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 @@ -0,0 +1 @@ +81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file From c1272c181e6bd96d1db725dfbbcd5ac8e6058502 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 11 Oct 2022 10:20:34 -0700 Subject: [PATCH 045/122] Add a new node role 'search' which is dedicated to provide search capability (#4689) Signed-off-by: Tianli Feng --- CHANGELOG.md | 1 + .../src/main/java/org/opensearch/client/Node.java | 7 +++++++ .../java/org/opensearch/client/NodeTests.java | 7 +++++++ .../admin/cluster/stats/ClusterStatsIT.java | 2 ++ .../opensearch/cluster/node/DiscoveryNode.java | 9 +++++++++ .../cluster/node/DiscoveryNodeRole.java | 15 ++++++++++++++- .../cluster/node/DiscoveryNodeTests.java | 7 +++++++ 7 files changed, 47 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 273c7d2eb088c..73cf43a765cdc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) +- Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c02ac6c68718f..2fa6605d57ad2 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -239,6 +239,13 @@ public boolean isIngest() { return roles.contains("ingest"); } + /** + * Returns whether the node is dedicated to provide search capability. + */ + public boolean isSearch() { + return roles.contains("search"); + } + @Override public String toString() { return String.join(",", roles); diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 352296fa3024a..296c4a1f09122 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -48,7 +48,9 @@ import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.equalTo; public class NodeTests extends RestClientTestCase { public void testToString() { @@ -161,4 +163,9 @@ public void testEqualsAndHashCode() { ) ); } + + public void testIsSearchNode() { + Roles searchRole = new Roles(Collections.singleton("search")); + assertThat(searchRole.isSearch(), equalTo(true)); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index a44cf05a4bdc4..11d1af608fbee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -89,6 +89,7 @@ public void testNodeCounts() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 1); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); int numNodes = randomIntBetween(1, 5); @@ -160,6 +161,7 @@ public void testNodeCountsWithDeprecatedMasterRole() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 0); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 0); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 016c0aac44be6..ec72ea8b49829 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -460,6 +460,15 @@ public boolean isRemoteClusterClient() { return roles.contains(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); } + /** + * Returns whether the node is dedicated to provide search capability. + * + * @return true if the node contains search role, false otherwise + */ + public boolean isSearchNode() { + return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 5685667c05b1a..bfc44378632d8 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -290,11 +290,24 @@ public Setting legacySetting() { }; + /** + * Represents the role for a search node, which is dedicated to provide search capability. + */ + public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + + @Override + public Setting legacySetting() { + // search role is added in 2.4 so doesn't need to configure legacy setting + return null; + } + + }; + /** * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, SEARCH_ROLE)) ); /** diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 70a64fc60bdb4..8c30a8ff19c89 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; +import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; @@ -204,4 +205,10 @@ public void testGetRoleFromRoleNameIsCaseInsensitive() { assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleName()); assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); } + + public void testDiscoveryNodeIsSearchNode() { + final Settings settingWithSearchRole = NodeRoles.onlyRole(DiscoveryNodeRole.SEARCH_ROLE); + final DiscoveryNode node = DiscoveryNode.createLocal(settingWithSearchRole, buildNewFakeTransportAddress(), "node"); + assertThat(node.isSearchNode(), equalTo(true)); + } } From 4833e080cc506237fee5aa90767d6f49d952bec3 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 11 Oct 2022 16:59:43 -0400 Subject: [PATCH 046/122] Migrate client transports to Apache HttpClient / Core 5.x (#4459) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../resources/forbidden/http-signatures.txt | 27 +- buildSrc/version.properties | 2 + .../benchmark/rest/RestClientBenchmark.java | 8 +- client/rest-high-level/build.gradle | 6 + .../client/ClusterRequestConverters.java | 8 +- .../client/IndicesRequestConverters.java | 10 +- .../client/IngestRequestConverters.java | 8 +- .../opensearch/client/RequestConverters.java | 26 +- .../client/RestHighLevelClient.java | 6 +- .../client/SnapshotRequestConverters.java | 8 +- .../client/TasksRequestConverters.java | 4 +- .../forbidden/rest-high-level-signatures.txt | 11 +- .../opensearch/client/ClusterClientIT.java | 7 +- .../client/ClusterRequestConvertersTests.java | 4 +- .../CustomRestHighLevelClientTests.java | 23 +- .../HighLevelRestClientCompressionIT.java | 6 +- .../opensearch/client/IndicesClientIT.java | 6 +- .../client/IndicesRequestConvertersTests.java | 10 +- .../client/IngestRequestConvertersTests.java | 8 +- .../client/MockRestHighLevelTests.java | 15 +- ...OpenSearchRestHighLevelClientTestCase.java | 6 +- .../org/opensearch/client/PingAndInfoIT.java | 2 +- .../java/org/opensearch/client/PitIT.java | 4 +- .../client/RequestConvertersTests.java | 44 +- .../client/RestHighLevelClientExtTests.java | 10 +- .../client/RestHighLevelClientTests.java | 112 ++-- .../java/org/opensearch/client/SearchIT.java | 4 +- .../SnapshotRequestConvertersTests.java | 8 +- .../client/TasksRequestConvertersTests.java | 4 +- .../documentation/CRUDDocumentationIT.java | 2 +- .../MiscellaneousDocumentationIT.java | 6 +- client/rest/build.gradle | 15 +- .../licenses/httpasyncclient-4.1.5.jar.sha1 | 1 - .../rest/licenses/httpasyncclient-LICENSE.txt | 182 ------ .../rest/licenses/httpasyncclient-NOTICE.txt | 5 - .../rest/licenses/httpclient-4.5.13.jar.sha1 | 1 - .../rest/licenses/httpclient5-5.1.3.jar.sha1 | 1 + ...nt-LICENSE.txt => httpclient5-LICENSE.txt} | 0 ...ient-NOTICE.txt => httpclient5-NOTICE.txt} | 2 +- client/rest/licenses/httpcore-4.4.15.jar.sha1 | 1 - client/rest/licenses/httpcore-LICENSE.txt | 178 ------ .../licenses/httpcore-nio-4.4.15.jar.sha1 | 1 - client/rest/licenses/httpcore-nio-LICENSE.txt | 202 ------- client/rest/licenses/httpcore5-5.1.4.jar.sha1 | 1 + client/rest/licenses/httpcore5-LICENSE.txt | 558 ++++++++++++++++++ ...re-nio-NOTICE.txt => httpcore5-NOTICE.txt} | 6 +- .../rest/licenses/httpcore5-h2-5.1.4.jar.sha1 | 1 + client/rest/licenses/httpcore5-h2-LICENSE.txt | 558 ++++++++++++++++++ ...ore-NOTICE.txt => httpcore5-h2-NOTICE.txt} | 5 +- .../rest/licenses/slf4j-api-1.7.36.jar.sha1 | 1 + client/rest/licenses/slf4j-api-LICENSE.txt | 21 + client/rest/licenses/slf4j-api-NOTICE.txt | 0 .../org/opensearch/client/Cancellable.java | 28 +- .../client/HasAttributeNodeSelector.java | 14 + .../HeapBufferedAsyncResponseConsumer.java | 125 ---- .../HttpAsyncResponseConsumerFactory.java | 20 +- .../client/HttpDeleteWithEntity.java | 54 -- .../opensearch/client/HttpGetWithEntity.java | 54 -- .../main/java/org/opensearch/client/Node.java | 22 +- ...tentCredentialsAuthenticationStrategy.java | 77 --- .../PreferHasAttributeNodeSelector.java | 14 + .../java/org/opensearch/client/Request.java | 18 +- .../org/opensearch/client/RequestLogger.java | 76 ++- .../org/opensearch/client/RequestOptions.java | 24 +- .../java/org/opensearch/client/Response.java | 28 +- .../opensearch/client/ResponseException.java | 13 +- .../org/opensearch/client/RestClient.java | 302 +++++++--- .../opensearch/client/RestClientBuilder.java | 58 +- .../client/http/HttpUriRequestProducer.java | 63 ++ .../opensearch/client/http/package-info.java | 12 + .../nio/HeapBufferedAsyncEntityConsumer.java | 139 +++++ .../HeapBufferedAsyncResponseConsumer.java | 123 ++++ .../nio/HttpEntityAsyncEntityProducer.java | 182 ++++++ .../opensearch/client/nio/package-info.java | 12 + .../FailureTrackingResponseListenerTests.java | 18 +- .../client/HasAttributeNodeSelectorTests.java | 2 +- ...eapBufferedAsyncResponseConsumerTests.java | 93 ++- .../client/HostsTrackingFailureListener.java | 2 +- .../opensearch/client/NodeSelectorTests.java | 2 +- .../java/org/opensearch/client/NodeTests.java | 2 +- .../PreferHasAttributeNodeSelectorTests.java | 2 +- .../opensearch/client/RequestLoggerTests.java | 69 +-- .../client/RequestOptionsTests.java | 13 +- .../org/opensearch/client/RequestTests.java | 19 +- .../client/ResponseExceptionTests.java | 31 +- .../client/RestClientBuilderIntegTests.java | 5 +- .../client/RestClientBuilderTests.java | 13 +- .../client/RestClientCompressionTests.java | 11 +- .../RestClientGzipCompressionTests.java | 13 +- .../RestClientMultipleHostsIntegTests.java | 8 +- .../client/RestClientMultipleHostsTests.java | 7 +- .../RestClientSingleHostIntegTests.java | 98 +-- .../client/RestClientSingleHostTests.java | 243 ++++---- .../opensearch/client/RestClientTests.java | 17 +- .../RestClientDocumentation.java | 126 ++-- client/sniffer/build.gradle | 5 +- .../licenses/httpclient-4.5.13.jar.sha1 | 1 - .../licenses/httpclient5-5.1.3.jar.sha1 | 1 + .../sniffer/licenses/httpcore-4.4.15.jar.sha1 | 1 - .../sniffer/licenses/httpcore5-5.1.4.jar.sha1 | 1 + .../client/sniff/OpenSearchNodesSniffer.java | 8 +- .../client/sniff/MockNodesSniffer.java | 2 +- .../OpenSearchNodesSnifferParseTests.java | 9 +- .../sniff/OpenSearchNodesSnifferTests.java | 12 +- .../sniff/SniffOnFailureListenerTests.java | 2 +- .../client/sniff/SnifferBuilderTests.java | 3 +- .../opensearch/client/sniff/SnifferTests.java | 2 +- .../documentation/SnifferDocumentation.java | 12 +- client/test/build.gradle | 2 +- .../opensearch/client/RestClientTestCase.java | 3 +- .../opensearch/client/RestClientTestUtil.java | 5 +- .../test/rest/WaitForRefreshAndCloseIT.java | 5 +- .../OpenSearchDashboardsSystemIndexIT.java | 19 +- .../AbstractAsyncBulkByScrollAction.java | 3 +- .../index/reindex/ReindexSslConfig.java | 26 +- .../opensearch/index/reindex/Reindexer.java | 50 +- .../reindex/remote/RemoteRequestBuilders.java | 8 +- .../remote/RemoteScrollableHitSource.java | 17 +- .../spi/ReindexRestInterceptorProvider.java | 3 +- .../remote/RemoteRequestBuildersTests.java | 16 +- .../RemoteScrollableHitSourceTests.java | 191 +++--- .../RepositoryURLClientYamlTestSuiteIT.java | 8 +- .../rest/discovery/Zen2RestApiIT.java | 5 +- .../azure/AzureBlobContainerRetriesTests.java | 4 +- ...CloudStorageBlobContainerRetriesTests.java | 2 +- ...rossClusterSearchUnavailableClusterIT.java | 8 +- .../upgrades/FullClusterRestartIT.java | 23 +- .../upgrades/QueryBuilderBWCIT.java | 5 +- .../org/opensearch/backwards/ExceptionIT.java | 10 +- .../org/opensearch/backwards/IndexingIT.java | 7 +- .../AbstractMultiClusterRemoteTestCase.java | 6 +- .../org/opensearch/upgrades/IndexingIT.java | 7 +- .../org/opensearch/upgrades/RecoveryIT.java | 2 +- .../http/DetailedErrorsDisabledIT.java | 5 +- .../http/DetailedErrorsEnabledIT.java | 5 +- .../opensearch/http/HttpCompressionIT.java | 9 +- .../java/org/opensearch/http/NoHandlerIT.java | 7 +- .../http/RestHttpResponseHeadersIT.java | 2 +- .../http/SearchRestCancellationIT.java | 8 +- .../RestHighLevelClientProducer.java | 6 +- .../org/opensearch/wildfly/WildflyIT.java | 21 +- .../bootstrap/test-framework.policy | 6 + .../client/RestClientBuilderTestCase.java | 3 +- .../AbstractBlobContainerRetriesTestCase.java | 4 +- ...chMockAPIBasedRepositoryIntegTestCase.java | 3 +- .../test/OpenSearchIntegTestCase.java | 5 +- .../test/rest/OpenSearchRestTestCase.java | 47 +- .../rest/yaml/ClientYamlDocsTestClient.java | 4 +- .../test/rest/yaml/ClientYamlTestClient.java | 34 +- .../yaml/ClientYamlTestExecutionContext.java | 6 +- .../rest/yaml/ClientYamlTestResponse.java | 6 +- .../opensearch/test/rest/yaml/ObjectPath.java | 2 +- .../OpenSearchClientYamlSuiteTestCase.java | 3 +- .../ClientYamlTestExecutionContextTests.java | 2 +- .../rest/yaml/section/DoSectionTests.java | 2 +- 156 files changed, 3142 insertions(+), 1940 deletions(-) delete mode 100644 client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 delete mode 100644 client/rest/licenses/httpasyncclient-LICENSE.txt delete mode 100644 client/rest/licenses/httpasyncclient-NOTICE.txt delete mode 100644 client/rest/licenses/httpclient-4.5.13.jar.sha1 create mode 100644 client/rest/licenses/httpclient5-5.1.3.jar.sha1 rename client/rest/licenses/{httpclient-LICENSE.txt => httpclient5-LICENSE.txt} (100%) rename client/rest/licenses/{httpclient-NOTICE.txt => httpclient5-NOTICE.txt} (72%) delete mode 100644 client/rest/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-LICENSE.txt delete mode 100644 client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-nio-LICENSE.txt create mode 100644 client/rest/licenses/httpcore5-5.1.4.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-LICENSE.txt rename client/rest/licenses/{httpcore-nio-NOTICE.txt => httpcore5-NOTICE.txt} (56%) create mode 100644 client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-h2-LICENSE.txt rename client/rest/licenses/{httpcore-NOTICE.txt => httpcore5-h2-NOTICE.txt} (55%) create mode 100644 client/rest/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 client/rest/licenses/slf4j-api-LICENSE.txt create mode 100644 client/rest/licenses/slf4j-api-NOTICE.txt delete mode 100644 client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java delete mode 100644 client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/package-info.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java create mode 100644 client/rest/src/main/java/org/opensearch/client/nio/package-info.java delete mode 100644 client/sniffer/licenses/httpclient-4.5.13.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore-4.4.15.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 73cf43a765cdc..e321efc39a2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) ### Deprecated diff --git a/buildSrc/src/main/resources/forbidden/http-signatures.txt b/buildSrc/src/main/resources/forbidden/http-signatures.txt index dcf20bbb09387..bfd81b3521a40 100644 --- a/buildSrc/src/main/resources/forbidden/http-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/http-signatures.txt @@ -15,31 +15,14 @@ # language governing permissions and limitations under the License. @defaultMessage Explicitly specify the ContentType of HTTP entities when creating -org.apache.http.entity.StringEntity#(java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ByteArrayEntity#(byte[]) -org.apache.http.entity.ByteArrayEntity#(byte[],int,int) -org.apache.http.entity.FileEntity#(java.io.File) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream,long) -org.apache.http.nio.entity.NByteArrayEntity#(byte[]) -org.apache.http.nio.entity.NByteArrayEntity#(byte[],int,int) -org.apache.http.nio.entity.NFileEntity#(java.io.File) -org.apache.http.nio.entity.NStringEntity#(java.lang.String) -org.apache.http.nio.entity.NStringEntity#(java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) @defaultMessage Use non-deprecated constructors -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String) -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String,boolean) -org.apache.http.entity.FileEntity#(java.io.File,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.FileEntity#(java.io.File,org.apache.hc.core5.http.ContentType) @defaultMessage BasicEntity is easy to mess up and forget to set content type -org.apache.http.entity.BasicHttpEntity#() - -@defaultMessage EntityTemplate is easy to mess up and forget to set content type -org.apache.http.entity.EntityTemplate#(org.apache.http.entity.ContentProducer) +org.apache.hc.core5.http.io.entity.BasicHttpEntity#(java.io.InputStream,org.apache.hc.core5.http.ContentType) @defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type -org.apache.http.entity.SerializableEntity#(java.io.Serializable) +org.apache.hc.core5.http.io.entity.SerializableEntity#(java.io.Serializable,org.apache.hc.core5.http.ContentType) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index bf72245c63918..a779389b3ca82 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -25,6 +25,8 @@ netty = 4.1.79.Final joda = 2.10.13 # client dependencies +httpclient5 = 5.1.3 +httpcore5 = 5.1.4 httpclient = 4.5.13 httpcore = 4.4.15 httpasyncclient = 4.1.5 diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index d2d7163b8dee2..e8dcff814603d 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -31,10 +31,10 @@ package org.opensearch.client.benchmark.rest; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.message.BasicHeader; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.message.BasicHeader; import org.opensearch.OpenSearchException; import org.opensearch.client.Request; import org.opensearch.client.Response; diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 07147ce81b72e..7fa2855d85487 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -104,3 +104,9 @@ testClusters.all { extraConfigFile nodeTrustStore.name, nodeTrustStore extraConfigFile pkiTrustCert.name, pkiTrustCert } + +thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory' +) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 37a1ab8812845..4ff8e75b521b6 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 3a5384f23b90e..ca9154340a660 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java index 2504dec3af36e..4c044413642ac 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 91c339cc92c1b..88e3a3a904830 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -269,7 +269,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } } request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); return request; } @@ -358,7 +358,7 @@ static Request index(IndexRequest indexRequest) { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); return request; } @@ -514,7 +514,7 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); request.addParameters(params.asMap()); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -549,7 +549,7 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -817,7 +817,7 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } static String endpoint(String index, String id) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 0a5880b778942..27f13fc3c00c4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; @@ -2220,9 +2220,9 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType()); if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); + throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType()); } try (XContentParser parser = xContentType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { return entityParser.apply(parser); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java index 3d44820966608..263d7db82ba08 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java index ff89950f37cb9..78a74ca04ff9b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; import org.opensearch.client.tasks.CancelTasksRequest; diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 68dc509e5ff27..0d7749b39fb91 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -15,10 +15,9 @@ # language governing permissions and limitations under the License. @defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type -org.apache.http.entity.ContentType#create(java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +org.apache.hc.core5.http.ContentType#create(java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.nio.charset.Charset) @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.opensearch.common.logging.DeprecationLogger @@ -30,7 +29,3 @@ org.opensearch.common.logging.PrefixLogger @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.opensearch.common.xcontent.LoggingDeprecationHandler - -@defaultMessage Use Nonblocking org.apache.http.nio.entity.NByteArrayEntity -org.apache.http.entity.ByteArrayEntity -org.apache.http.entity.StringEntity diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 71b869fb59e7b..82d2cbe9149ca 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -32,7 +32,8 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -220,7 +221,7 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } - public void testClusterHealthYellowClusterLevel() throws IOException { + public void testClusterHealthYellowClusterLevel() throws IOException, ParseException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest(); @@ -231,7 +232,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } - public void testClusterHealthYellowIndicesLevel() throws IOException { + public void testClusterHealthYellowIndicesLevel() throws IOException, ParseException { String firstIndex = "index"; String secondIndex = "index2"; // including another index that we do not assert on, to ensure that we are not diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 27adc18fd37b8..f201599632969 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,6 +42,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.CoreMatchers; import org.junit.Assert; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java index 1d94f190c611c..972c96999945f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java @@ -32,15 +32,14 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.lucene.util.BytesRef; import org.opensearch.Build; import org.opensearch.Version; @@ -172,13 +171,13 @@ private Response mockPerformRequest(Request request) throws IOException { when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200)); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT); BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); - when(mockResponse.getEntity()).thenReturn(new NByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); + when(mockResponse.getEntity()).thenReturn(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); return mockResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java index c0c03ed1d0e7c..054d0ae8670b5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java @@ -31,9 +31,9 @@ package org.opensearch.client; -import org.apache.http.HttpHeaders; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpHeaders; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..750b0c15e9c14 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index fdb5f2843b44d..512cc058a64a7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 200069ade1ea2..8aae33307279b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -32,10 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; @@ -44,6 +40,10 @@ import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Assert; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java index bd57c5c9e53f6..e1179c0f24cb8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java @@ -32,13 +32,12 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.junit.Before; import java.io.IOException; @@ -64,9 +63,9 @@ private void setupClient() throws IOException { when(mockResponse.getWarnings()).thenReturn(WARNINGS); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); WarningFailureException expectedException = new WarningFailureException(mockResponse); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java index efac508cf6814..a8c73393f54ce 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -61,6 +61,8 @@ import org.opensearch.search.SearchModule; import org.opensearch.tasks.TaskId; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.AfterClass; import org.junit.Before; @@ -324,7 +326,7 @@ protected static void setupRemoteClusterConfig(String remoteClusterName) throws }); } - protected static Map toMap(Response response) throws IOException { + protected static Map toMap(Response response) throws IOException, OpenSearchParseException, ParseException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java index 09ef90cef144d..6f66a5279afa3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.opensearch.client.core.MainResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index be9b614a8720f..1f10deb400ecc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -8,8 +8,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Before; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index ee5795deb165d..576fe02718ba3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -32,14 +32,6 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.util.EntityUtils; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -120,6 +112,14 @@ import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -733,8 +733,8 @@ public void testIndex() throws IOException { assertEquals(method, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); + assertTrue(entity instanceof ByteArrayEntity); + assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType()); try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { assertEquals(nbFields, parser.map().size()); } @@ -805,11 +805,11 @@ public void testUpdate() throws IOException { assertEquals(HttpPost.METHOD_NAME, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); + assertTrue(entity instanceof ByteArrayEntity); UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } @@ -926,7 +926,7 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { Streams.readFully(inputStream, content); @@ -979,7 +979,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -989,7 +989,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -1001,7 +1001,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { } Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { BulkRequest bulkRequest = new BulkRequest(); @@ -1289,7 +1289,7 @@ public void testSearchScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(searchScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testClearScroll() throws IOException { @@ -1303,7 +1303,7 @@ public void testClearScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(clearScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testCreatePit() throws IOException { @@ -1324,7 +1324,7 @@ public void testCreatePit() throws IOException { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(createPitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeletePit() throws IOException { @@ -1337,7 +1337,7 @@ public void testDeletePit() throws IOException { assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(endpoint, request.getEndpoint()); assertToXContentBody(deletePitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeleteAllPits() { @@ -1456,7 +1456,7 @@ public void testMultiSearchTemplate() throws Exception { HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1763,7 +1763,7 @@ public void testDeleteScriptRequest() { static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index dbdf7eba3dca4..5743820ff0175 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -64,14 +64,14 @@ public void initClient() { public void testParseEntityCustomResponseSection() throws IOException { { - HttpEntity jsonEntity = new NStringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection1.class)); CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection; assertEquals("value", customResponseSection1.value); } { - HttpEntity jsonEntity = new NStringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection2.class)); CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 1b9c7d4aead12..9086fc9563c57 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -33,19 +33,6 @@ package org.opensearch.client; import com.fasterxml.jackson.core.JsonParseException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; @@ -87,6 +74,17 @@ import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.hamcrest.Matchers; import org.junit.Before; @@ -123,7 +121,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { private static final String SUBMIT_TASK_PREFIX = "submit_"; private static final String SUBMIT_TASK_SUFFIX = "_task"; private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1); - private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); + private static final RequestLine REQUEST_LINE = new RequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); /** * These APIs do not use a Request object (because they don't have a body, or any request parameters). @@ -258,7 +256,7 @@ private void mockResponse(ToXContent toXContent) throws IOException { Response response = mock(Response.class); ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType()); String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); - when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); + when(response.getEntity()).thenReturn(new StringEntity(requestBody, contentType)); when(restClient.performRequest(any(Request.class))).thenReturn(response); } @@ -308,14 +306,14 @@ public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows( IllegalStateException.class, - () -> restHighLevelClient.parseEntity(new NStringEntity("", (ContentType) null), null) + () -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null) ); assertEquals("OpenSearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage()); } { - NStringEntity entity = new NStringEntity("", ContentType.APPLICATION_SVG_XML); + StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null)); - assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage()); + assertEquals("Unsupported Content-Type: " + entity.getContentType(), ise.getMessage()); } { CheckedFunction entityParser = parser -> { @@ -326,9 +324,9 @@ public void testParseEntity() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return value; }; - HttpEntity jsonEntity = new NStringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser)); - HttpEntity yamlEntity = new NStringEntity("---\nfield: value\n", ContentType.create("application/yaml")); + HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml")); assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser)); HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile")); assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser)); @@ -342,13 +340,13 @@ private static HttpEntity createBinaryEntity(XContentBuilder xContentBuilder, Co builder.startObject(); builder.field("field", "value"); builder.endObject(); - return new NByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); + return new ByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); } } public void testConvertExistsResponse() { RestStatus restStatus = randomBoolean() ? RestStatus.OK : randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); boolean result = RestHighLevelClient.convertExistsResponse(response); assertEquals(restStatus == RestStatus.OK, result); @@ -357,7 +355,7 @@ public void testConvertExistsResponse() { public void testParseResponseException() throws IOException { { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -367,9 +365,9 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity( + new StringEntity( "{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON ) @@ -383,8 +381,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -395,8 +393,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -411,7 +409,7 @@ public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse); { @@ -437,7 +435,7 @@ public void testPerformRequestOnSuccess() throws IOException { ); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -451,7 +449,7 @@ public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOExcept MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -474,9 +472,9 @@ public void testPerformRequestOnResponseExceptionWithEntity() throws IOException MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -500,8 +498,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOExc MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -525,8 +523,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -549,7 +547,7 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -569,7 +567,7 @@ public void testPerformRequestOnResponseExceptionWithIgnores() throws IOExceptio public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -591,8 +589,8 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -620,7 +618,7 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertNull(trackingActionListener.exception.get()); assertEquals(restStatus.getStatus(), trackingActionListener.statusCode.get()); @@ -633,13 +631,13 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertThat(trackingActionListener.exception.get(), instanceOf(IOException.class)); IOException ioe = (IOException) trackingActionListener.exception.get(); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -670,7 +668,7 @@ public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IO Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -689,9 +687,9 @@ public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOExc Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); @@ -712,8 +710,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -732,8 +730,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -753,7 +751,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOEx trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -771,7 +769,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorNoBody() trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -791,8 +789,8 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorValidBody trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -1162,6 +1160,6 @@ public void onFailure(Exception e) { } private static StatusLine newStatusLine(RestStatus restStatus) { - return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); + return new StatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 8b509e5d19e92..cc6f08217d057 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.explain.ExplainRequest; @@ -101,6 +99,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 10baaa2e53dd4..e86de6ba718f9 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java index 64fec3c8fb810..a777bbc5d1868 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.tasks.TaskId; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index 959c5a827f143..c63b311feebc7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java index 0213441a0b6a7..3edf639da8867 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; @@ -92,8 +92,8 @@ public void testInitializationFromClientBuilder() throws IOException { //tag::rest-high-level-client-init RestHighLevelClient client = new RestHighLevelClient( RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http"))); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201))); //end::rest-high-level-client-init //tag::rest-high-level-client-close diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 01c186ed83fc2..178359948f32e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,12 +40,12 @@ group = 'org.opensearch.client' archivesBaseName = 'opensearch-rest-client' dependencies { - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" - api "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - api "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.slf4j:slf4j-api:${versions.slf4j}" testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" @@ -54,6 +54,9 @@ dependencies { testImplementation "org.mockito:mockito-core:${versions.mockito}" testImplementation "org.objenesis:objenesis:${versions.objenesis}" testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" + testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } tasks.withType(CheckForbiddenApis).configureEach { @@ -85,6 +88,10 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-LICENSE.txt b/client/rest/licenses/httpasyncclient-LICENSE.txt deleted file mode 100644 index 2c41ec88f61cf..0000000000000 --- a/client/rest/licenses/httpasyncclient-LICENSE.txt +++ /dev/null @@ -1,182 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) - diff --git a/client/rest/licenses/httpasyncclient-NOTICE.txt b/client/rest/licenses/httpasyncclient-NOTICE.txt deleted file mode 100644 index b45be98d168a4..0000000000000 --- a/client/rest/licenses/httpasyncclient-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache HttpComponents AsyncClient -Copyright 2010-2016 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpclient-4.5.13.jar.sha1 b/client/rest/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/rest/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.1.3.jar.sha1 b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient5-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpclient-LICENSE.txt rename to client/rest/licenses/httpclient5-LICENSE.txt diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient5-NOTICE.txt similarity index 72% rename from client/rest/licenses/httpclient-NOTICE.txt rename to client/rest/licenses/httpclient5-NOTICE.txt index 91e5c40c4c6d3..afee7c6e6880b 100644 --- a/client/rest/licenses/httpclient-NOTICE.txt +++ b/client/rest/licenses/httpclient5-NOTICE.txt @@ -1,5 +1,5 @@ Apache HttpComponents Client -Copyright 1999-2016 The Apache Software Foundation +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/rest/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt deleted file mode 100644 index e454a52586f29..0000000000000 --- a/client/rest/licenses/httpcore-LICENSE.txt +++ /dev/null @@ -1,178 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 deleted file mode 100644 index 251b35ab6a1a5..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-LICENSE.txt b/client/rest/licenses/httpcore-nio-LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/client/rest/licenses/httpcore-nio-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client/rest/licenses/httpcore5-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-LICENSE.txt b/client/rest/licenses/httpcore5-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-nio-NOTICE.txt b/client/rest/licenses/httpcore5-NOTICE.txt similarity index 56% rename from client/rest/licenses/httpcore-nio-NOTICE.txt rename to client/rest/licenses/httpcore5-NOTICE.txt index a2e17bb60009f..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-nio-NOTICE.txt +++ b/client/rest/licenses/httpcore5-NOTICE.txt @@ -1,8 +1,6 @@ - -Apache HttpCore NIO -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..2369ee9dfb7e1 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 @@ -0,0 +1 @@ +04de79e0bb34d65c86e4d163ae2f45d53746b70d \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-LICENSE.txt b/client/rest/licenses/httpcore5-h2-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore5-h2-NOTICE.txt similarity index 55% rename from client/rest/licenses/httpcore-NOTICE.txt rename to client/rest/licenses/httpcore5-h2-NOTICE.txt index 013448d3e9561..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-NOTICE.txt +++ b/client/rest/licenses/httpcore5-h2-NOTICE.txt @@ -1,5 +1,6 @@ -Apache HttpComponents Core -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/client/rest/licenses/slf4j-api-LICENSE.txt b/client/rest/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..8fda22f4d72f6 --- /dev/null +++ b/client/rest/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client/rest/licenses/slf4j-api-NOTICE.txt b/client/rest/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 4bfc0704227aa..56e31a3742f35 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -31,24 +31,26 @@ package org.opensearch.client; -import org.apache.http.client.methods.AbstractExecutionAwareRequest; -import org.apache.http.client.methods.HttpRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.concurrent.CancellableDependency; import java.util.concurrent.CancellationException; /** * Represents an operation that can be cancelled. * Returned when executing async requests through {@link RestClient#performRequestAsync(Request, ResponseListener)}, so that the request - * can be cancelled if needed. Cancelling a request will result in calling {@link AbstractExecutionAwareRequest#abort()} on the underlying + * can be cancelled if needed. Cancelling a request will result in calling {@link CancellableDependency#cancel()} on the underlying * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. * Note that cancelling a request does not automatically translate to aborting its execution on the server side, which needs to be * specifically implemented in each API. */ -public class Cancellable { +public class Cancellable implements org.apache.hc.core5.concurrent.Cancellable { static final Cancellable NO_OP = new Cancellable(null) { @Override - public void cancel() {} + public boolean cancel() { + throw new UnsupportedOperationException(); + } @Override void runIfNotCancelled(Runnable runnable) { @@ -56,13 +58,13 @@ void runIfNotCancelled(Runnable runnable) { } }; - static Cancellable fromRequest(HttpRequestBase httpRequest) { + static Cancellable fromRequest(CancellableDependency httpRequest) { return new Cancellable(httpRequest); } - private final HttpRequestBase httpRequest; + private final CancellableDependency httpRequest; - private Cancellable(HttpRequestBase httpRequest) { + private Cancellable(CancellableDependency httpRequest) { this.httpRequest = httpRequest; } @@ -70,15 +72,15 @@ private Cancellable(HttpRequestBase httpRequest) { * Cancels the on-going request that is associated with the current instance of {@link Cancellable}. * */ - public synchronized void cancel() { - this.httpRequest.abort(); + public synchronized boolean cancel() { + return this.httpRequest.cancel(); } /** * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different - * attempts of the same request. The low-level client reuses the same instance of the {@link AbstractExecutionAwareRequest} by calling - * {@link AbstractExecutionAwareRequest#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and * the subsequent attempt has not been started yet. * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the @@ -87,7 +89,7 @@ public synchronized void cancel() { * when there is no future to cancel, which would make cancelling the request a no-op. */ synchronized void runIfNotCancelled(Runnable runnable) { - if (this.httpRequest.isAborted()) { + if (this.httpRequest.isCancelled()) { throw newCancellationException(); } runnable.run(); diff --git a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java index e6005c207ec93..0a54dbaf30364 100644 --- a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java @@ -57,6 +57,10 @@ public HasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { Iterator itr = nodes.iterator(); @@ -70,6 +74,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -82,11 +90,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java deleted file mode 100644 index e2993e48a5a05..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpException; -import org.apache.http.HttpResponse; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.entity.ContentBufferEntity; -import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer; -import org.apache.http.nio.util.ByteBufferAllocator; -import org.apache.http.nio.util.HeapByteBufferAllocator; -import org.apache.http.nio.util.SimpleInputBuffer; -import org.apache.http.protocol.HttpContext; - -import java.io.IOException; - -/** - * Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole - * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. - * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer - * than the configured buffer limit. - */ -public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { - - private final int bufferLimitBytes; - private volatile HttpResponse response; - private volatile SimpleInputBuffer buf; - - /** - * Creates a new instance of this consumer with the provided buffer limit. - * - * @param bufferLimit the buffer limit. Must be greater than 0. - * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. - */ - public HeapBufferedAsyncResponseConsumer(int bufferLimit) { - if (bufferLimit <= 0) { - throw new IllegalArgumentException("bufferLimit must be greater than 0"); - } - this.bufferLimitBytes = bufferLimit; - } - - /** - * Get the limit of the buffer. - */ - public int getBufferLimit() { - return bufferLimitBytes; - } - - @Override - protected void onResponseReceived(HttpResponse response) throws HttpException, IOException { - this.response = response; - } - - @Override - protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { - long len = entity.getContentLength(); - if (len > bufferLimitBytes) { - throw new ContentTooLongException( - "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" - ); - } - if (len < 0) { - len = 4096; - } - this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator()); - this.response.setEntity(new ContentBufferEntity(entity, this.buf)); - } - - /** - * Returns the instance of {@link ByteBufferAllocator} to use for content buffering. - * Allows to plug in any {@link ByteBufferAllocator} implementation. - */ - protected ByteBufferAllocator getByteBufferAllocator() { - return HeapByteBufferAllocator.INSTANCE; - } - - @Override - protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException { - this.buf.consumeContent(decoder); - } - - @Override - protected HttpResponse buildResult(HttpContext context) throws Exception { - return response; - } - - @Override - protected void releaseResources() { - response = null; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java index 7a56e03a1162c..6420a615484d0 100644 --- a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java +++ b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java @@ -32,30 +32,31 @@ package org.opensearch.client; -import org.apache.http.HttpResponse; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import static org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT; /** - * Factory used to create instances of {@link HttpAsyncResponseConsumer}. Each request retry needs its own instance of the + * Factory used to create instances of {@link AsyncResponseConsumer}. Each request retry needs its own instance of the * consumer object. Users can implement this interface and pass their own instance to the specialized * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. */ public interface HttpAsyncResponseConsumerFactory { /** - * Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. + * Creates the default type of {@link AsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. */ HttpAsyncResponseConsumerFactory DEFAULT = new HeapBufferedResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); /** - * Creates the {@link HttpAsyncResponseConsumer}, called once per request attempt. + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. */ - HttpAsyncResponseConsumer createHttpAsyncResponseConsumer(); + AsyncResponseConsumer createHttpAsyncResponseConsumer(); /** - * Default factory used to create instances of {@link HttpAsyncResponseConsumer}. + * Default factory used to create instances of {@link AsyncResponseConsumer}. * Creates one instance of {@link HeapBufferedAsyncResponseConsumer} for each request attempt, with a configurable * buffer limit which defaults to 100MB. */ @@ -75,8 +76,11 @@ public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { this.bufferLimit = bufferLimitBytes; } + /** + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. + */ @Override - public HttpAsyncResponseConsumer createHttpAsyncResponseConsumer() { + public AsyncResponseConsumer createHttpAsyncResponseConsumer() { return new HeapBufferedAsyncResponseConsumer(bufferLimit); } } diff --git a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java deleted file mode 100644 index 52618cd7edc75..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send DELETE requests providing a body (not supported out of the box) - */ -final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpDelete.METHOD_NAME; - - HttpDeleteWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java deleted file mode 100644 index 8ab639433f6be..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpGet; - -import java.net.URI; - -/** - * Allows to send GET requests providing a body (not supported out of the box) - */ -final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpGet.METHOD_NAME; - - HttpGetWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index 2fa6605d57ad2..8fe5dcfa00db0 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.List; import java.util.Map; @@ -152,6 +152,9 @@ public Map> getAttributes() { return attributes; } + /** + * Convert node to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -174,6 +177,10 @@ public String toString() { return b.append(']').toString(); } + /** + * Compare two nodes for equality + * @param obj node instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -188,6 +195,9 @@ public boolean equals(Object obj) { && Objects.equals(attributes, other.attributes); } + /** + * Calculate the hash code of the node + */ @Override public int hashCode() { return Objects.hash(host, boundHosts, name, version, roles, attributes); @@ -246,11 +256,18 @@ public boolean isSearch() { return roles.contains("search"); } + /** + * Convert roles to string representation + */ @Override public String toString() { return String.join(",", roles); } + /** + * Compare two roles for equality + * @param obj roles instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -260,6 +277,9 @@ public boolean equals(Object obj) { return roles.equals(other.roles); } + /** + * Calculate the hash code of the roles + */ @Override public int hashCode() { return roles.hashCode(); diff --git a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java deleted file mode 100644 index 8a35d6eb607ca..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScheme; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.protocol.HttpContext; - -/** - * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform - * any special handling if authentication fails. - * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication - * credentials if it receives a 401 response from the server. While this can be useful for browser, it is - * rarely the desired behaviour with the OpenSearch REST API. - * If the code using the REST client has configured credentials for the REST API, then we can and should - * assume that this is intentional, and those credentials represent the best possible authentication - * mechanism to the OpenSearch node. - * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable - * to perform the requisite password checks (the node has not yet recovered its state, or an external - * authentication provider was unavailable). - * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials - * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). - */ -final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { - - private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); - - @Override - public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { - if (logger.isDebugEnabled()) { - logger.debug( - "Authentication to " - + host - + " failed (scheme: " - + authScheme.getSchemeName() - + "). Preserving credentials for next request" - ); - } - // Do nothing. - // The superclass implementation of method will clear the credentials from the cache, but we don't - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java index ddec1da068bf0..7cf7490692650 100644 --- a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java @@ -58,6 +58,10 @@ public PreferHasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { boolean foundAtLeastOne = false; @@ -99,6 +103,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -111,11 +119,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index df81ca7f717ae..441b01b0891ad 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import java.util.HashMap; import java.util.Map; @@ -133,7 +133,7 @@ public void setEntity(HttpEntity entity) { * @param entity JSON string to be set as the entity body of the request. */ public void setJsonEntity(String entity) { - setEntity(entity == null ? null : new NStringEntity(entity, ContentType.APPLICATION_JSON)); + setEntity(entity == null ? null : new StringEntity(entity, ContentType.APPLICATION_JSON)); } /** @@ -176,6 +176,9 @@ public RequestOptions getOptions() { return options; } + /** + * Convert request to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -192,6 +195,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two requests for equality + * @param obj request instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -209,6 +216,9 @@ public boolean equals(Object obj) { && options.equals(other.options); } + /** + * Calculate the hash code of the request + */ @Override public int hashCode() { return Objects.hash(method, endpoint, parameters, entity, options); diff --git a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java index 297885fa3131b..0f2e0e6da834d 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java @@ -34,16 +34,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.StatusLine; import java.io.BufferedReader; import java.io.IOException; @@ -66,17 +66,10 @@ private RequestLogger() {} /** * Logs a request that yielded a response */ - static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, ClassicHttpResponse httpResponse) { if (logger.isDebugEnabled()) { logger.debug( - "request [" - + request.getMethod() - + " " - + host - + getUri(request.getRequestLine()) - + "] returned [" - + httpResponse.getStatusLine() - + "]" + "request [" + request.getMethod() + " " + host + getUri(request) + "] returned [" + new StatusLine(httpResponse) + "]" ); } if (logger.isWarnEnabled()) { @@ -109,7 +102,7 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR */ static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; @@ -127,7 +120,7 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ StringBuilder message = new StringBuilder("request [").append(request.getMethod()) .append(" ") .append(host) - .append(getUri(request.getRequestLine())) + .append(getUri(request)) .append("] returned ") .append(warnings.length) .append(" warnings: "); @@ -144,17 +137,18 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ * Creates curl output for given request */ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { - String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'"; - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; - if (enclosingRequest.getEntity() != null) { - requestLine += " -d '"; - HttpEntity entity = enclosingRequest.getEntity(); - if (entity.isRepeatable() == false) { - entity = new BufferedHttpEntity(enclosingRequest.getEntity()); - enclosingRequest.setEntity(entity); - } + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request) + "'"; + if (request.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = request.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(request.getEntity()); + request.setEntity(entity); + } + try { requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } catch (final ParseException ex) { + throw new IOException(ex); } } return requestLine; @@ -163,10 +157,10 @@ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IO /** * Creates curl output for given response */ - static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + static String buildTraceResponse(ClassicHttpResponse httpResponse) throws IOException { StringBuilder responseLine = new StringBuilder(); - responseLine.append("# ").append(httpResponse.getStatusLine()); - for (Header header : httpResponse.getAllHeaders()) { + responseLine.append("# ").append(new StatusLine(httpResponse)); + for (Header header : httpResponse.getHeaders()) { responseLine.append("\n# ").append(header.getName()).append(": ").append(header.getValue()); } responseLine.append("\n#"); @@ -176,7 +170,7 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { entity = new BufferedHttpEntity(entity); } httpResponse.setEntity(entity); - ContentType contentType = ContentType.get(entity); + ContentType contentType = ContentType.parse(entity.getContentType()); Charset charset = StandardCharsets.UTF_8; if (contentType != null && contentType.getCharset() != null) { charset = contentType.getCharset(); @@ -191,10 +185,14 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { return responseLine.toString(); } - private static String getUri(RequestLine requestLine) { - if (requestLine.getUri().charAt(0) != '/') { - return "/" + requestLine.getUri(); + private static String getUri(HttpUriRequest request) { + final String uri = request.getRequestUri(); + if (uri == null) { + return "/"; + } else if (!uri.startsWith("/")) { + return "/" + uri; + } else { + return uri; } - return requestLine.getUri(); } } diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 5390e303ff499..189d785faaf45 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -86,7 +86,7 @@ public List

getHeaders() { /** * The {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. */ @@ -124,6 +124,9 @@ public RequestConfig getRequestConfig() { return requestConfig; } + /** + * Convert request options to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -152,6 +155,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two request options for equality + * @param obj request options instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -167,6 +174,9 @@ public boolean equals(Object obj) { && Objects.equals(warningsHandler, other.warningsHandler); } + /** + * Calculate the hash code of the request options + */ @Override public int hashCode() { return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); @@ -218,11 +228,11 @@ public Builder addHeader(String name, String value) { /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. * - * @param httpAsyncResponseConsumerFactory factory for creating {@link HttpAsyncResponseConsumer}. + * @param httpAsyncResponseConsumerFactory factory for creating {@link AsyncResponseConsumer}. * @throws NullPointerException if {@code httpAsyncResponseConsumerFactory} is null. */ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index d380607b7df9e..c758826b776ba 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import java.util.ArrayList; import java.util.List; @@ -53,9 +54,9 @@ public class Response { private final RequestLine requestLine; private final HttpHost host; - private final HttpResponse response; + private final ClassicHttpResponse response; - Response(RequestLine requestLine, HttpHost host, HttpResponse response) { + Response(RequestLine requestLine, HttpHost host, ClassicHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); @@ -82,14 +83,14 @@ public HttpHost getHost() { * Returns the status line of the current response */ public StatusLine getStatusLine() { - return response.getStatusLine(); + return new StatusLine(response); } /** * Returns all the response headers */ public Header[] getHeaders() { - return response.getAllHeaders(); + return response.getHeaders(); } /** @@ -199,12 +200,15 @@ public boolean hasWarnings() { return warnings != null && warnings.length > 0; } - HttpResponse getHttpResponse() { + ClassicHttpResponse getHttpResponse() { return response; } + /** + * Convert response to string representation + */ @Override public String toString() { - return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; + return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + getStatusLine() + '}'; } } diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseException.java b/client/rest/src/main/java/org/opensearch/client/ResponseException.java index 8104c32c422e5..ed816c7e1177e 100644 --- a/client/rest/src/main/java/org/opensearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/opensearch/client/ResponseException.java @@ -32,9 +32,10 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import java.io.IOException; import java.util.Locale; @@ -77,7 +78,11 @@ static String buildMessage(Response response) throws IOException { entity = new BufferedHttpEntity(entity); response.getHttpResponse().setEntity(entity); } - message += "\n" + EntityUtils.toString(entity); + try { + message += "\n" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } return message; } diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 92aed2c8fb179..9d140a145b004 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -33,36 +33,43 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.entity.HttpEntityWrapper; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.client.AuthCache; -import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.entity.GzipCompressingEntity; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.nio.client.methods.HttpAsyncMethods; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.auth.AuthScheme; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.Credentials; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import javax.net.ssl.SSLHandshakeException; import java.io.ByteArrayInputStream; @@ -70,6 +77,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; @@ -92,6 +100,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; @@ -218,7 +227,7 @@ public static RestClientBuilder builder(String cloudId) { } String url = decodedParts[1] + "." + domain; - return builder(new HttpHost(url, port, "https")); + return builder(new HttpHost("https", url, port)); } /** @@ -287,7 +296,7 @@ public List getNodes() { * @return client running status */ public boolean isRunning() { - return client.isRunning(); + return client.getStatus() == IOReactorStatus.ACTIVE; } /** @@ -323,7 +332,7 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - HttpResponse httpResponse; + ClassicHttpResponse httpResponse; try { httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); } catch (Exception e) { @@ -353,18 +362,18 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } - private ResponseOrResponseException convertResponse(InternalRequest request, Node node, HttpResponse httpResponse) throws IOException { + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) + throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); - int statusCode = httpResponse.getStatusLine().getStatusCode(); + int statusCode = httpResponse.getCode(); Optional.ofNullable(httpResponse.getEntity()) .map(HttpEntity::getContentEncoding) - .map(Header::getValue) .filter("gzip"::equalsIgnoreCase) .map(gzipHeaderValue -> new GzipDecompressingEntity(httpResponse.getEntity())) .ifPresent(httpResponse::setEntity); - Response response = new Response(request.httpRequest.getRequestLine(), node.getHost(), httpResponse); + Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { onResponse(node); if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { @@ -418,47 +427,56 @@ private void performRequestAsync( ) { request.cancellable.runIfNotCancelled(() -> { final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { - @Override - public void completed(HttpResponse httpResponse) { - try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); - if (responseOrResponseException.responseException == null) { - listener.onSuccess(responseOrResponseException.response); - } else { + Future future = client.execute( + context.requestProducer, + context.asyncResponseConsumer, + context.context, + new FutureCallback() { + @Override + public void completed(ClassicHttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodeTuple.nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodeTuple, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); + onFailure(context.node); if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(responseOrResponseException.responseException); + listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); } else { - listener.onDefinitiveFailure(responseOrResponseException.responseException); + listener.onDefinitiveFailure(failure); } + } catch (Exception e) { + listener.onDefinitiveFailure(e); } - } catch (Exception e) { - listener.onDefinitiveFailure(e); } - } - @Override - public void failed(Exception failure) { - try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); - if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(failure); - performRequestAsync(nodeTuple, request, listener); - } else { - listener.onDefinitiveFailure(failure); - } - } catch (Exception e) { - listener.onDefinitiveFailure(e); + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); } } + ); - @Override - public void cancelled() { - listener.onDefinitiveFailure(Cancellable.newCancellationException()); - } - }); + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } }); } @@ -583,6 +601,9 @@ private void onFailure(Node node) { failureListener.onFailure(node); } + /** + * Close the underlying {@link CloseableHttpAsyncClient} instance + */ @Override public void close() throws IOException { client.close(); @@ -608,12 +629,12 @@ private static void addSuppressedException(Exception suppressedException, Except } } - private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + private HttpUriRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch (method.toUpperCase(Locale.ROOT)) { - case HttpDeleteWithEntity.METHOD_NAME: - return addRequestBody(new HttpDeleteWithEntity(uri), entity); - case HttpGetWithEntity.METHOD_NAME: - return addRequestBody(new HttpGetWithEntity(uri), entity); + case HttpDelete.METHOD_NAME: + return addRequestBody(new HttpDelete(uri), entity); + case HttpGet.METHOD_NAME: + return addRequestBody(new HttpGet(uri), entity); case HttpHead.METHOD_NAME: return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: @@ -633,22 +654,18 @@ private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity ent } } - private HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { + private HttpUriRequestBase addRequestBody(HttpUriRequestBase httpRequest, HttpEntity entity) { if (entity != null) { - if (httpRequest instanceof HttpEntityEnclosingRequestBase) { - if (compressionEnabled) { - if (chunkedEnabled.isPresent()) { - entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); - } else { - entity = new ContentCompressingEntity(entity); - } - } else if (chunkedEnabled.isPresent()) { - entity = new ContentHttpEntity(entity, chunkedEnabled.get()); + if (compressionEnabled) { + if (chunkedEnabled.isPresent()) { + entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); + } else { + entity = new ContentCompressingEntity(entity); } - ((HttpEntityEnclosingRequestBase) httpRequest).setEntity(entity); - } else { - throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } else if (chunkedEnabled.isPresent()) { + entity = new ContentHttpEntity(entity, chunkedEnabled.get()); } + httpRequest.setEntity(entity); } return httpRequest; } @@ -673,7 +690,12 @@ static URI buildUri(String pathPrefix, String path, Map params) for (Map.Entry param : params.entrySet()) { uriBuilder.addParameter(param.getKey(), param.getValue()); } - return uriBuilder.build(); + + // The Apache HttpClient 5.x **does not** encode URIs but Apache HttpClient 4.x does. It leads + // to the issues with Unicode characters (f.e. document IDs could contain Unicode characters) and + // weird characters are being passed instead. By using `toASCIIString()`, the URI is already created + // with proper encoding. + return new URI(uriBuilder.build().toASCIIString()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage(), e); } @@ -802,7 +824,7 @@ public void remove() { private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; - private final HttpRequestBase httpRequest; + private final HttpUriRequestBase httpRequest; private final Cancellable cancellable; private final WarningsHandler warningsHandler; @@ -839,7 +861,7 @@ private void setHeaders(HttpRequest httpRequest, Collection
requestHeade } } - private void setRequestConfig(HttpRequestBase httpRequest, RequestConfig requestConfig) { + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { if (requestConfig != null) { httpRequest.setConfig(requestConfig); } @@ -851,21 +873,81 @@ RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { } } + /** + * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided + * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext + * and ensuring that the credentials are indeed provided for particular HttpHost, otherwise returning no authentication scheme + * even if it is present in the cache. + */ + private static class WrappingAuthCache implements AuthCache { + private final HttpClientContext context; + private final AuthCache delegate; + private final boolean usePersistentCredentials = true; + + public WrappingAuthCache(HttpClientContext context, AuthCache delegate) { + this.context = context; + this.delegate = delegate; + } + + @Override + public void put(HttpHost host, AuthScheme authScheme) { + delegate.put(host, authScheme); + } + + @Override + public AuthScheme get(HttpHost host) { + AuthScheme authScheme = delegate.get(host); + + if (authScheme != null) { + final CredentialsProvider credsProvider = context.getCredentialsProvider(); + if (credsProvider != null) { + final String schemeName = authScheme.getName(); + final AuthScope authScope = new AuthScope(host, null, schemeName); + final Credentials creds = credsProvider.getCredentials(authScope, context); + + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2203 + if (authScheme instanceof BasicScheme) { + ((BasicScheme) authScheme).initPreemptive(creds); + } + + if (creds == null) { + return null; + } + } + } + + return authScheme; + } + + @Override + public void remove(HttpHost host) { + if (!usePersistentCredentials) { + delegate.remove(host); + } + } + + @Override + public void clear() { + delegate.clear(); + } + + } + private static class RequestContext { private final Node node; - private final HttpAsyncRequestProducer requestProducer; - private final HttpAsyncResponseConsumer asyncResponseConsumer; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; RequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it - this.requestProducer = HttpAsyncMethods.create(node.getHost(), request.httpRequest); + this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); this.asyncResponseConsumer = request.request.getOptions() .getHttpAsyncResponseConsumerFactory() .createHttpAsyncResponseConsumer(); this.context = HttpClientContext.create(); - context.setAuthCache(authCache); + context.setAuthCache(new WrappingAuthCache(context, authCache)); } } @@ -966,7 +1048,9 @@ private static Exception extractAndWrapCause(Exception exception) { /** * A gzip compressing entity that also implements {@code getContent()}. */ - public static class ContentCompressingEntity extends GzipCompressingEntity { + public static class ContentCompressingEntity extends HttpEntityWrapper { + private static final String GZIP_CODEC = "gzip"; + private Optional chunkedEnabled; /** @@ -979,6 +1063,14 @@ public ContentCompressingEntity(HttpEntity entity) { this.chunkedEnabled = Optional.empty(); } + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return GZIP_CODEC; + } + /** * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. * @@ -990,11 +1082,14 @@ public ContentCompressingEntity(HttpEntity entity, boolean chunkedEnabled) { this.chunkedEnabled = Optional.of(chunkedEnabled); } + /** + * Returns a content stream of the entity. + */ @Override public InputStream getContent() throws IOException { ByteArrayInputOutputStream out = new ByteArrayInputOutputStream(1024); try (GZIPOutputStream gzipOut = new GZIPOutputStream(out)) { - wrappedEntity.writeTo(gzipOut); + super.writeTo(gzipOut); } return out.asInput(); } @@ -1030,9 +1125,24 @@ public long getContentLength() { return size; } } else { - return super.getContentLength(); + return -1; } } + + /** + * Writes the entity content out to the output stream. + * @param outStream the output stream to write entity content to + * @throws IOException if an I/O error occurs + */ + @Override + public void writeTo(final OutputStream outStream) throws IOException { + Args.notNull(outStream, "Output stream"); + final GZIPOutputStream gzip = new GZIPOutputStream(outStream); + super.writeTo(gzip); + // Only close output stream if the wrapped entity has been + // successfully written out + gzip.close(); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 8841d371754c3..679a7ccb17d49 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,15 +32,23 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.util.Timeout; +import org.apache.hc.client5.http.async.HttpAsyncClient; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; + import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; @@ -50,19 +58,19 @@ /** * Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally - * creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created - * {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed. + * creating the underlying {@link HttpAsyncClient}. Also allows to provide an externally created + * {@link HttpAsyncClient} in case additional customization is needed. */ public final class RestClientBuilder { /** - * The default connection timout in milliseconds. + * The default connection timeout in milliseconds. */ public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; /** - * The default socket timeout in milliseconds. + * The default response timeout in milliseconds. */ - public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; + public static final int DEFAULT_RESPONSE_TIMEOUT_MILLIS = 30000; /** * The default maximum of connections per route. @@ -296,20 +304,26 @@ public RestClient build() { private CloseableHttpAsyncClient createHttpClient() { // default timeouts are all infinite RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() - .setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) - .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS); + .setConnectTimeout(Timeout.ofMilliseconds(DEFAULT_CONNECT_TIMEOUT_MILLIS)) + .setResponseTimeout(Timeout.ofMilliseconds(DEFAULT_RESPONSE_TIMEOUT_MILLIS)); if (requestConfigCallback != null) { requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder); } try { - HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() - .setDefaultRequestConfig(requestConfigBuilder.build()) - // default settings for connection pooling may be too constraining + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(SSLContext.getDefault()).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()) - .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); + .setTlsStrategy(tlsStrategy) + .build(); + + HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() + .setDefaultRequestConfig(requestConfigBuilder.build()) + .setConnectionManager(connectionManager) + .setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE) + .disableAutomaticRetries(); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } @@ -344,9 +358,9 @@ public interface RequestConfigCallback { public interface HttpClientConfigCallback { /** * Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}. - * Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication - * or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default - * value that the {@link RestClientBuilder} internally sets, like connection pooling. + * Commonly used to customize the default {@link CredentialsProvider} for authentication for communication + * through TLS/SSL without losing any other useful default value that the {@link RestClientBuilder} internally + * sets, like connection pooling. * * @param httpClientBuilder the {@link HttpClientBuilder} for customizing the client instance. */ diff --git a/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java new file mode 100644 index 0000000000000..a65427cd0b032 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.nio.HttpEntityAsyncEntityProducer; + +/** + * The producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class HttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + HttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @return new request producer + */ + public static HttpUriRequestProducer create(final HttpUriRequestBase request, final HttpHost host) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final HttpEntity entity = request.getEntity(); + AsyncEntityProducer entityProducer = null; + + if (entity != null) { + entityProducer = new HttpEntityAsyncEntityProducer(entity); + } + + return new HttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/package-info.java b/client/rest/src/main/java/org/opensearch/client/http/package-info.java new file mode 100644 index 0000000000000..32e0aa2016d53 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * HTTP support classes for REST client. + */ +package org.opensearch.client.http; diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java new file mode 100644 index 0000000000000..9bd17d1c24c7e --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.nio.AsyncEntityConsumer; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.util.ByteArrayBuffer; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Default implementation of {@link AsyncEntityConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncEntityConsumer extends AbstractBinAsyncEntityConsumer { + + private final int bufferLimitBytes; + private AtomicReference bufferRef = new AtomicReference<>(); + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncEntityConsumer(int bufferLimit) { + if (bufferLimit <= 0) { + throw new IllegalArgumentException("bufferLimit must be greater than 0"); + } + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; + } + + /** + * Triggered to signal beginning of entity content stream. + * + * @param contentType the entity content type + */ + @Override + protected void streamStart(final ContentType contentType) throws HttpException, IOException {} + + /** + * Triggered to obtain the capacity increment. + * + * @return the number of bytes this consumer is prepared to process. + */ + @Override + protected int capacityIncrement() { + return Integer.MAX_VALUE; + } + + /** + * Triggered to pass incoming data packet to the data consumer. + * + * @param src the data packet. + * @param endOfStream flag indicating whether this data packet is the last in the data stream. + * + */ + @Override + protected void data(final ByteBuffer src, final boolean endOfStream) throws IOException { + if (src == null) { + return; + } + + ByteArrayBuffer buffer = bufferRef.get(); + if (buffer == null) { + buffer = new ByteArrayBuffer(bufferLimitBytes); + if (bufferRef.compareAndSet(null, buffer) == false) { + buffer = bufferRef.get(); + } + } + + int len = src.limit(); + if (buffer.length() + len > bufferLimitBytes) { + throw new ContentTooLongException( + "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" + ); + } + + if (len < 0) { + len = 4096; + } + + if (src.hasArray()) { + buffer.append(src.array(), src.arrayOffset() + src.position(), src.remaining()); + } else { + while (src.hasRemaining()) { + buffer.append(src.get()); + } + } + } + + /** + * Triggered to generate entity representation. + * + * @return the entity content + */ + @Override + protected byte[] generateContent() throws IOException { + final ByteArrayBuffer buffer = bufferRef.get(); + return buffer == null ? new byte[0] : buffer.toByteArray(); + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + ByteArrayBuffer buffer = bufferRef.getAndSet(null); + if (buffer != null) { + buffer.clear(); + buffer = null; + } + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java new file mode 100644 index 0000000000000..3d93478f49f99 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; + +import java.io.IOException; + +/** + * Default implementation of {@link AsyncResponseConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { + private static final Log LOGGER = LogFactory.getLog(HeapBufferedAsyncResponseConsumer.class); + private final int bufferLimit; + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncResponseConsumer(int bufferLimit) { + super(new HeapBufferedAsyncEntityConsumer(bufferLimit)); + this.bufferLimit = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimit; + } + + /** + * Triggered to signal receipt of an intermediate (1xx) HTTP response. + * + * @param response the intermediate (1xx) HTTP response. + * @param context the actual execution context. + */ + @Override + public void informationResponse(final HttpResponse response, final HttpContext context) throws HttpException, IOException {} + + /** + * Triggered to generate object that represents a result of response message processing. + * @param response the response message. + * @param entity the response entity. + * @param contentType the response content type. + * @return the result of response processing. + */ + @Override + protected ClassicHttpResponse buildResult(final HttpResponse response, final byte[] entity, final ContentType contentType) { + final ClassicHttpResponse classicResponse = new BasicClassicHttpResponse(response.getCode()); + classicResponse.setVersion(response.getVersion()); + classicResponse.setHeaders(response.getHeaders()); + classicResponse.setReasonPhrase(response.getReasonPhrase()); + if (response.getLocale() != null) { + classicResponse.setLocale(response.getLocale()); + } + + if (entity != null) { + String encoding = null; + + try { + final Header contentEncoding = response.getHeader(HttpHeaders.CONTENT_ENCODING); + if (contentEncoding != null) { + encoding = contentEncoding.getValue(); + } + } catch (final HttpException ex) { + LOGGER.debug("Unable to detect content encoding", ex); + } + + final ByteArrayEntity httpEntity = new ByteArrayEntity(entity, contentType, encoding); + classicResponse.setEntity(httpEntity); + } + + return classicResponse; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java new file mode 100644 index 0000000000000..81fe77ddcfbed --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; +import org.apache.hc.core5.http.nio.ResourceHolder; +import org.apache.hc.core5.util.Args; +import org.apache.hc.core5.util.Asserts; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * The {@link AsyncEntityProducer} implementation for {@link HttpEntity} + */ +public class HttpEntityAsyncEntityProducer implements AsyncEntityProducer { + + private final HttpEntity entity; + private final ByteBuffer byteBuffer; + private final boolean chunked; + private final AtomicReference exception; + private final AtomicReference channelRef; + private boolean eof; + + /** + * Create new async HTTP entity producer + * @param entity HTTP entity + * @param bufferSize buffer size + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity, final int bufferSize) { + this.entity = Args.notNull(entity, "Http Entity"); + this.byteBuffer = ByteBuffer.allocate(bufferSize); + this.chunked = entity.isChunked(); + this.exception = new AtomicReference<>(); + this.channelRef = new AtomicReference<>(); + } + + /** + * Create new async HTTP entity producer with default buffer size (8192 bytes) + * @param entity HTTP entity + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity) { + this(entity, 8192); + } + + /** + * Determines whether the producer can consistently produce the same content + * after invocation of {@link ResourceHolder#releaseResources()}. + */ + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + /** + * Returns content type of the entity, if known. + */ + @Override + public String getContentType() { + return entity.getContentType(); + } + + /** + * Returns length of the entity, if known. + */ + @Override + public long getContentLength() { + return entity.getContentLength(); + } + + /** + * Returns the number of bytes immediately available for output. + * This method can be used as a hint to control output events + * of the underlying I/O session. + * + * @return the number of bytes immediately available for output + */ + @Override + public int available() { + return Integer.MAX_VALUE; + } + + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return entity.getContentEncoding(); + } + + /** + * Returns chunked transfer hint for this entity. + *

+ * The behavior of wrapping entities is implementation dependent, + * but should respect the primary purpose. + *

+ */ + @Override + public boolean isChunked() { + return chunked; + } + + /** + * Preliminary declaration of trailing headers. + */ + @Override + public Set getTrailerNames() { + return entity.getTrailerNames(); + } + + /** + * Triggered to signal the ability of the underlying data channel + * to accept more data. The data producer can choose to write data + * immediately inside the call or asynchronously at some later point. + * + * @param channel the data channel capable to accepting more data. + */ + @Override + public void produce(final DataStreamChannel channel) throws IOException { + ReadableByteChannel stream = channelRef.get(); + if (stream == null) { + stream = Channels.newChannel(entity.getContent()); + Asserts.check(channelRef.getAndSet(stream) == null, "Illegal producer state"); + } + if (!eof) { + final int bytesRead = stream.read(byteBuffer); + if (bytesRead < 0) { + eof = true; + } + } + if (byteBuffer.position() > 0) { + byteBuffer.flip(); + channel.write(byteBuffer); + byteBuffer.compact(); + } + if (eof && byteBuffer.position() == 0) { + channel.endStream(); + releaseResources(); + } + } + + /** + * Triggered to signal a failure in data generation. + * + * @param cause the cause of the failure. + */ + @Override + public void failed(final Exception cause) { + if (exception.compareAndSet(null, cause)) { + releaseResources(); + } + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + eof = false; + final ReadableByteChannel stream = channelRef.getAndSet(null); + if (stream != null) { + try { + stream.close(); + } catch (final IOException ex) { + /* Close quietly */ + } + } + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/package-info.java b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java new file mode 100644 index 0000000000000..ce4961ed21f7c --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * NIO support classes for REST client. + */ +package org.opensearch.client.nio; diff --git a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java index 0a997a586acc9..9722ec867a376 100644 --- a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java @@ -32,14 +32,11 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import java.util.concurrent.atomic.AtomicReference; @@ -116,9 +113,8 @@ public void onFailure(Exception exception) { private static Response mockResponse() { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); } } diff --git a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java index fd18bba6ee548..b5aca86e95d6c 100644 --- a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java index 22852fe4cb793..ed329d973eb78 100644 --- a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -32,34 +32,31 @@ package org.opensearch.client; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.protocol.HttpContext; - +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.EntityDetails; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.impl.BasicEntityDetails; +import org.apache.hc.core5.http.io.entity.AbstractHttpEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { @@ -67,33 +64,6 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; private static final int TEST_BUFFER_LIMIT = 10 * 1024 * 1024; - public void testResponseProcessing() throws Exception { - ContentDecoder contentDecoder = mock(ContentDecoder.class); - IOControl ioControl = mock(IOControl.class); - HttpContext httpContext = mock(HttpContext.class); - - HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT)); - - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN)); - - // everything goes well - consumer.responseReceived(httpResponse); - consumer.consumeContent(contentDecoder, ioControl); - consumer.responseCompleted(httpContext); - - verify(consumer).releaseResources(); - verify(consumer).buildResult(httpContext); - assertTrue(consumer.isDone()); - assertSame(httpResponse, consumer.getResult()); - - consumer.responseCompleted(httpContext); - verify(consumer, times(1)).releaseResources(); - verify(consumer, times(1)).buildResult(httpContext); - } - public void testDefaultBufferLimit() throws Exception { HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT); bufferLimitTest(consumer, TEST_BUFFER_LIMIT); @@ -127,7 +97,7 @@ public void testCanConfigureHeapBufferLimitFromOutsidePackage() throws ClassNotF assertThat(object, instanceOf(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class)); HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = (HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory) object; - HttpAsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); + AsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); assertThat(consumer, instanceOf(HeapBufferedAsyncResponseConsumer.class)); HeapBufferedAsyncResponseConsumer bufferedAsyncResponseConsumer = (HeapBufferedAsyncResponseConsumer) consumer; assertEquals(bufferLimit, bufferedAsyncResponseConsumer.getBufferLimit()); @@ -138,23 +108,40 @@ public void testHttpAsyncResponseConsumerFactoryVisibility() throws ClassNotFoun } private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception { - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - consumer.onResponseReceived(new BasicHttpResponse(statusLine)); + HttpContext httpContext = mock(HttpContext.class); + + BasicClassicHttpResponse response = new BasicClassicHttpResponse(200, "OK"); + consumer.consumeResponse(response, null, httpContext, null); final AtomicReference contentLength = new AtomicReference<>(); - HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) { + HttpEntity entity = new AbstractHttpEntity(ContentType.APPLICATION_JSON, null, false) { @Override public long getContentLength() { return contentLength.get(); } + + @Override + public InputStream getContent() throws IOException, UnsupportedOperationException { + return new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + } + + @Override + public boolean isStreaming() { + return false; + } + + @Override + public void close() throws IOException {} }; contentLength.set(randomLongBetween(0L, bufferLimit)); - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + response.setEntity(entity); + + final EntityDetails details = new BasicEntityDetails(4096, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); try { - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); } catch (ContentTooLongException e) { assertEquals( "entity content is too long [" + entity.getContentLength() + "] for the configured buffer limit [" + bufferLimit + "]", diff --git a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java index 2b256e7205397..0e454c6f919f5 100644 --- a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.HashSet; import java.util.List; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index 65a831e59bfb0..cfc95f0281bcc 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 296c4a1f09122..748bec5fb7de5 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.Arrays; diff --git a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java index 0135cde573743..7dde1b96b3b45 100644 --- a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java index 3c317db1b72d9..8dea2ad922bd6 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java @@ -32,27 +32,29 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.StatusLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -66,8 +68,8 @@ import static org.junit.Assert.assertThat; public class RequestLoggerTests extends RestClientTestCase { - public void testTraceRequest() throws IOException, URISyntaxException { - HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https"); + public void testTraceRequest() throws IOException, URISyntaxException, ParseException { + HttpHost host = new HttpHost(randomBoolean() ? "http" : "https", "localhost", 9200); String expectedEndpoint = "/index/type/_api"; URI uri; if (randomBoolean()) { @@ -77,11 +79,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { } HttpUriRequest request = randomHttpRequest(uri); String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; - boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean(); + boolean hasBody = !(request instanceof HttpTrace) && randomBoolean(); String requestBody = "{ \"field\": \"value\" }"; if (hasBody) { expected += " -d '" + requestBody + "'"; - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; switch (randomIntBetween(0, 4)) { case 0: @@ -94,10 +95,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { ); break; case 2: - entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 3: - entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); + entity = new ByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); break; case 4: // Evil entity without a charset @@ -106,24 +107,24 @@ public void testTraceRequest() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(); } - enclosingRequest.setEntity(entity); + request.setEntity(entity); } String traceRequest = RequestLogger.buildTraceRequest(request, host); assertThat(traceRequest, equalTo(expected)); if (hasBody) { // check that the body is still readable as most entities are not repeatable - String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + String body = EntityUtils.toString(request.getEntity(), StandardCharsets.UTF_8); assertThat(body, equalTo(requestBody)); } } - public void testTraceResponse() throws IOException { + public void testTraceResponse() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); int statusCode = randomIntBetween(200, 599); String reasonPhrase = "REASON"; - BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + StatusLine statusLine = new StatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, reasonPhrase); int numHeaders = randomIntBetween(0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); @@ -192,13 +193,13 @@ private static HttpUriRequest randomHttpRequest(URI uri) { int requestType = randomIntBetween(0, 7); switch (requestType) { case 0: - return new HttpGetWithEntity(uri); + return new HttpGet(uri); case 1: return new HttpPost(uri); case 2: return new HttpPut(uri); case 3: - return new HttpDeleteWithEntity(uri); + return new HttpDelete(uri); case 4: return new HttpHead(uri); case 5: diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index aaa40db1442ee..a7f9a48c73393 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -32,8 +32,9 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -108,15 +109,15 @@ public void testSetRequestBuilder() { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); - int socketTimeout = 10000; - int connectTimeout = 100; - requestConfigBuilder.setSocketTimeout(socketTimeout).setConnectTimeout(connectTimeout); + Timeout responseTimeout = Timeout.ofMilliseconds(10000); + Timeout connectTimeout = Timeout.ofMilliseconds(100); + requestConfigBuilder.setResponseTimeout(responseTimeout).setConnectTimeout(connectTimeout); RequestConfig requestConfig = requestConfigBuilder.build(); builder.setRequestConfig(requestConfig); RequestOptions options = builder.build(); assertSame(options.getRequestConfig(), requestConfig); - assertEquals(options.getRequestConfig().getSocketTimeout(), socketTimeout); + assertEquals(options.getRequestConfig().getResponseTimeout(), responseTimeout); assertEquals(options.getRequestConfig().getConnectTimeout(), connectTimeout); } diff --git a/client/rest/src/test/java/org/opensearch/client/RequestTests.java b/client/rest/src/test/java/org/opensearch/client/RequestTests.java index ba15c0d0b733c..d11982e9f9642 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestTests.java @@ -32,15 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; @@ -133,7 +135,7 @@ public void testSetJsonEntity() throws IOException { final String json = randomAsciiLettersOfLengthBetween(1, 100); request.setJsonEntity(json); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); ByteArrayOutputStream os = new ByteArrayOutputStream(); request.getEntity().writeTo(os); assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); @@ -201,7 +203,10 @@ private static Request randomRequest() { randomFrom( new HttpEntity[] { new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), - new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new InputStreamEntity( + new ByteArrayInputStream(randomAsciiAlphanumOfLength(10).getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ), new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) } ) ); diff --git a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java index 8ecd3e1a29c99..dfbf105637962 100644 --- a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java @@ -32,19 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -57,10 +55,9 @@ public class ResponseExceptionTests extends RestClientTestCase { - public void testResponseException() throws IOException { + public void testResponseException() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 500, "Internal Server Error"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(500, "Internal Server Error"); String responseBody = "{\"error\":{\"root_cause\": {}}}"; boolean hasBody = getRandom().nextBoolean(); @@ -78,7 +75,7 @@ public void testResponseException() throws IOException { httpResponse.setEntity(entity); } - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); HttpHost httpHost = new HttpHost("localhost", 9200); Response response = new Response(requestLine, httpHost, httpResponse); ResponseException responseException = new ResponseException(response); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index 10bf9568c8798..f5e1735042e66 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -36,7 +36,8 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -117,7 +118,7 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { private RestClient buildRestClient() { InetSocketAddress address = httpsServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build(); + return RestClient.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); } private static SSLContext getSslContext() throws Exception { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java index ac81cd1132a2f..7165174e688e1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java @@ -32,11 +32,12 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.util.Timeout; import java.io.IOException; import java.util.Base64; @@ -271,7 +272,7 @@ public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder reques RequestConfig requestConfig = requestConfigBuilder.build(); assertEquals(RequestConfig.DEFAULT.getConnectionRequestTimeout(), requestConfig.getConnectionRequestTimeout()); // this way we get notified if the default ever changes - assertEquals(-1, requestConfig.getConnectionRequestTimeout()); + assertEquals(Timeout.ofMinutes(3), requestConfig.getConnectionRequestTimeout()); return requestConfigBuilder; } }); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java index e8b7742044f67..bf2c19b8127a1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java @@ -11,10 +11,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,7 +109,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression, boolean chunkedEnabled) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .setChunkedEnabled(chunkedEnabled) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java index 8c4d993517fee..fdcb65ff101c9 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java @@ -35,10 +35,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -126,7 +127,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .build(); } @@ -184,7 +185,7 @@ public void testCompressingClientSync() throws Exception { public void testCompressingClientAsync() throws Exception { InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + RestClient restClient = RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(true) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java index 277446191a36e..8c62533072c70 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -56,6 +57,7 @@ import static org.opensearch.client.RestClientTestUtil.getAllStatusCodes; import static org.opensearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.opensearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -63,7 +65,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. */ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { @@ -299,7 +301,7 @@ public void testNodeSelector() throws Exception { } catch (ConnectException e) { // Windows isn't consistent here. Sometimes the message is even null! if (false == System.getProperty("os.name").startsWith("Windows")) { - assertEquals("Connection refused", e.getMessage()); + assertThat(e.getMessage(), containsString("Connection refused")); } } } else { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index d88d4f4afd9b1..62574e5ed6d5a 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -33,9 +33,10 @@ package org.opensearch.client; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import java.io.IOException; diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index 0500d282a506d..beee1c5ca21a0 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -36,30 +36,34 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; import org.junit.After; import org.junit.Before; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -86,7 +90,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against a real http server, one single host. */ public class RestClientSingleHostIntegTests extends RestClientTestCase { @@ -147,7 +151,7 @@ private static class ResponseHandler implements HttpHandler { public void handle(HttpExchange httpExchange) throws IOException { // copy request body to response body so we can verify it was sent StringBuilder body = new StringBuilder(); - try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), StandardCharsets.UTF_8)) { char[] buffer = new char[256]; int read; while ((read = reader.read(buffer)) != -1) { @@ -164,7 +168,7 @@ public void handle(HttpExchange httpExchange) throws IOException { httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); if (body.length() > 0) { try (OutputStream out = httpExchange.getResponseBody()) { - out.write(body.toString().getBytes(Consts.UTF_8)); + out.write(body.toString().getBytes(StandardCharsets.UTF_8)); } } httpExchange.close(); @@ -172,18 +176,20 @@ public void handle(HttpExchange httpExchange) throws IOException { } private RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { - // provide the username/password for every request - final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); - - final RestClientBuilder restClientBuilder = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) - ).setDefaultHeaders(defaultHeaders); + final HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + final RestClientBuilder restClientBuilder = RestClient.builder(httpHost).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix(pathPrefix); } if (useAuth) { + // provide the username/password for every request + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials( + new AuthScope(httpHost, null, "Basic"), + new UsernamePasswordCredentials("user", "pass".toCharArray()) + ); + restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) { @@ -191,7 +197,7 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); // don't use the "persistent credentials strategy" - httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); + httpClientBuilder.setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -220,7 +226,7 @@ public void testManyAsyncRequests() throws Exception { final List exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < iters; i++) { Request request = new Request("PUT", "/200"); - request.setEntity(new NStringEntity("{}", ContentType.APPLICATION_JSON)); + request.setEntity(new StringEntity("{}", ContentType.APPLICATION_JSON)); restClient.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { @@ -271,7 +277,7 @@ public void onFailure(Exception exception) { /** * This test verifies some assumptions that we rely upon around the way the async http client works when reusing the same request - * throughout multiple retries, and the use of the {@link HttpRequestBase#abort()} method. + * throughout multiple retries, and the use of the {@link HttpUriRequestBase#abort()} method. * In fact the low-level REST client reuses the same request instance throughout multiple retries, and relies on the http client * to set the future ref to the request properly so that when abort is called, the proper future gets cancelled. */ @@ -279,7 +285,10 @@ public void testRequestResetAndAbort() throws Exception { try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { client.start(); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); - HttpGet httpGet = new HttpGet(pathPrefix + "/200"); + HttpUriRequestBase httpGet = new HttpUriRequestBase( + "GET", + new URIBuilder().setHttpHost(httpHost).setPath(pathPrefix + "/200").build() + ); // calling abort before the request is sent is a no-op httpGet.abort(); @@ -288,8 +297,11 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); + + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); - Future future = client.execute(httpHost, httpGet, null); + try { future.get(); fail("expected cancellation exception"); @@ -300,8 +312,9 @@ public void testRequestResetAndAbort() throws Exception { } { httpGet.reset(); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); assertTrue(httpGet.isAborted()); try { @@ -315,9 +328,9 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); - assertEquals(200, future.get().getStatusLine().getStatusCode()); + assertEquals(200, future.get().getCode()); assertFalse(future.isCancelled()); } } @@ -325,7 +338,7 @@ public void testRequestResetAndAbort() throws Exception { /** * End to end test for headers. We test it explicitly against a real http client as there are different ways - * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * to set/add headers to the {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever headers it received. */ public void testHeaders() throws Exception { @@ -365,7 +378,7 @@ public void testHeaders() throws Exception { /** * End to end test for delete with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testDeleteWithBody() throws Exception { @@ -374,7 +387,7 @@ public void testDeleteWithBody() throws Exception { /** * End to end test for get with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testGetWithBody() throws Exception { @@ -410,7 +423,7 @@ public void testEncodeParams() throws Exception { Request request = new Request("PUT", "/200"); request.addParameter("routing", "foo bar"); Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); - assertEquals(pathPrefix + "/200?routing=foo+bar", response.getRequestLine().getUri()); + assertEquals(pathPrefix + "/200?routing=foo%20bar", response.getRequestLine().getUri()); } { Request request = new Request("PUT", "/200"); @@ -540,4 +553,13 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, return esResponse; } + + private AsyncResponseConsumer getResponseConsumer() { + return new HeapBufferedAsyncResponseConsumer(1024); + } + + private HttpUriRequestProducer getRequestProducer(HttpUriRequestBase request, HttpHost host) { + return HttpUriRequestProducer.create(request, host); + + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index e5ce5eb91ad5a..f46a91aa910f8 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -34,38 +34,42 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.TimeValue; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.mockito.stubbing.Answer; +import org.opensearch.client.http.HttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; import java.io.IOException; @@ -85,6 +89,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import static java.util.Collections.singletonList; import static org.opensearch.client.RestClientTestUtil.getAllErrorStatusCodes; @@ -100,12 +105,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.nullable; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; /** * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, @@ -122,10 +121,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; private boolean strictDeprecationMode; + private LongAdder requests; + private AtomicReference requestProducerCapture; @Before public void createRestClient() { - httpClient = mockHttpClient(exec); + requests = new LongAdder(); + requestProducerCapture = new AtomicReference<>(); + httpClient = mockHttpClient(exec, (target, requestProducer, responseConsumer, pushHandlerFactory, context, callback) -> { + requests.increment(); + requestProducerCapture.set(requestProducer); + }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); @@ -143,41 +149,78 @@ public void createRestClient() { ); } + interface CloseableHttpAsyncClientListener { + void onExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ); + } + @SuppressWarnings("unchecked") - static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ) - ).thenAnswer((Answer>) invocationOnMock -> { - final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - // Call the callback asynchronous to better simulate how async http client works - return exec.submit(() -> { - if (futureCallback != null) { - try { - HttpResponse httpResponse = responseOrException(requestProducer); - futureCallback.completed(httpResponse); - } catch (Exception e) { - futureCallback.failed(e); + static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec, final CloseableHttpAsyncClientListener... listeners) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + Arrays.stream(listeners) + .forEach(l -> l.onExecute(target, requestProducer, responseConsumer, pushHandlerFactory, context, callback)); + // Call the callback asynchronous to better simulate how async http client works + return exec.submit(() -> { + if (callback != null) { + try { + ClassicHttpResponse httpResponse = responseOrException(requestProducer); + callback.completed((T) httpResponse); + } catch (Exception e) { + callback.failed(e); + } + return null; } - return null; - } - return responseOrException(requestProducer); - }); - }); + return (T) responseOrException(requestProducer); + }); + } + + @Override + public void awaitShutdown(TimeValue waitTime) throws InterruptedException {} + }; + return httpClient; } - private static HttpResponse responseOrException(HttpAsyncRequestProducer requestProducer) throws Exception { - final HttpUriRequest request = (HttpUriRequest) requestProducer.generateRequest(); - final HttpHost httpHost = requestProducer.getTarget(); + private static ClassicHttpResponse responseOrException(AsyncRequestProducer requestProducer) throws Exception { + final ClassicHttpRequest request = getRequest(requestProducer); + final HttpHost httpHost = new HttpHost(request.getAuthority()); // return the desired status code or exception depending on the path - switch (request.getURI().getPath()) { + switch (request.getRequestUri()) { case "/soe": throw new SocketTimeoutException(httpHost.toString()); case "/coe": @@ -193,20 +236,17 @@ private static HttpResponse responseOrException(HttpAsyncRequestProducer request case "/runtime": throw new RuntimeException(); default: - int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); - StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + int statusCode = Integer.parseInt(request.getRequestUri().substring(1)); - final HttpResponse httpResponse = new BasicHttpResponse(statusLine); + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, ""); // return the same body that was sent - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); - if (entity != null) { - assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); - httpResponse.setEntity(entity); - } + HttpEntity entity = request.getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); } // return the same headers that were sent - httpResponse.setHeaders(request.getAllHeaders()); + httpResponse.setHeaders(request.getHeaders()); return httpResponse; } } @@ -224,26 +264,20 @@ public void shutdownExec() { */ @SuppressWarnings("unchecked") public void testInternalHttpRequest() throws Exception { - ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class); int times = 0; for (String httpMethod : getHttpMethods()) { - HttpUriRequest expectedRequest = performRandomRequest(httpMethod); - verify(httpClient, times(++times)).execute( - requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ); - HttpUriRequest actualRequest = (HttpUriRequest) requestArgumentCaptor.getValue().generateRequest(); - assertEquals(expectedRequest.getURI(), actualRequest.getURI()); - assertEquals(expectedRequest.getClass(), actualRequest.getClass()); - assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); - if (expectedRequest instanceof HttpEntityEnclosingRequest) { - HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); - if (expectedEntity != null) { - HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); - assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); - } + ClassicHttpRequest expectedRequest = performRandomRequest(httpMethod); + assertThat(requests.intValue(), equalTo(++times)); + + ClassicHttpRequest actualRequest = getRequest(requestProducerCapture.get()); + assertEquals(expectedRequest.getRequestUri(), actualRequest.getRequestUri()); + assertEquals(expectedRequest.getMethod(), actualRequest.getMethod()); + assertArrayEquals(expectedRequest.getHeaders(), actualRequest.getHeaders()); + + HttpEntity expectedEntity = expectedRequest.getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = actualRequest.getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); } } } @@ -414,14 +448,14 @@ public void testBody() throws Exception { } } } - for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + for (String method : Arrays.asList("TRACE")) { Request request = new Request(method, "/" + randomStatusCode(getRandom())); request.setEntity(entity); try { performRequestSyncOrAsync(restClient, request); fail("request should have failed"); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo(method + " requests may not include an entity.")); } } } @@ -587,10 +621,10 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { HttpUriRequest expectedRequest; switch (method) { case "DELETE": - expectedRequest = new HttpDeleteWithEntity(uri); + expectedRequest = new HttpDelete(uri); break; case "GET": - expectedRequest = new HttpGetWithEntity(uri); + expectedRequest = new HttpGet(uri); break; case "HEAD": expectedRequest = new HttpHead(uri); @@ -614,14 +648,14 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { throw new UnsupportedOperationException("method not supported: " + method); } - if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) { + if (getRandom().nextBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); - ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity); + expectedRequest.setEntity(entity); request.setEntity(entity); } final Set uniqueNames = new HashSet<>(); - if (randomBoolean()) { + if (randomBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { @@ -698,4 +732,9 @@ private static void assertExceptionStackContainsCallingMethod(Throwable t) { t.printStackTrace(new PrintWriter(stack)); fail("didn't find the calling method (looks like " + myMethod + ") in:\n" + stack); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) throws NoSuchFieldException, IllegalAccessException { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index ca761dcb6b9b6..dd51da3a30d8c 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.AuthCache; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.opensearch.client.RestClient.NodeTuple; import java.io.IOException; @@ -410,10 +411,10 @@ public void testIsRunning() { CloseableHttpAsyncClient client = mock(CloseableHttpAsyncClient.class); RestClient restClient = new RestClient(client, new Header[] {}, nodes, null, null, null, false, false); - when(client.isRunning()).thenReturn(true); + when(client.getStatus()).thenReturn(IOReactorStatus.ACTIVE); assertTrue(restClient.isRunning()); - when(client.isRunning()).thenReturn(false); + when(client.getStatus()).thenReturn(IOReactorStatus.INACTIVE); assertFalse(restClient.isRunning()); } diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 066419844f048..f4c1c98dd4ce9 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -32,23 +32,28 @@ package org.opensearch.client.documentation; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.ssl.SSLContextBuilder; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.ssl.SSLContextBuilder; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.Cancellable; import org.opensearch.client.HttpAsyncResponseConsumerFactory; import org.opensearch.client.Node; @@ -109,12 +114,12 @@ public class RestClientDocumentation { // end::rest-client-options-singleton @SuppressWarnings("unused") - public void usage() throws IOException, InterruptedException { + public void usage() throws IOException, InterruptedException, ParseException { //tag::rest-client-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http")).build(); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201)).build(); //end::rest-client-init //tag::rest-client-close @@ -124,7 +129,7 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-default-headers RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")}; builder.setDefaultHeaders(defaultHeaders); // <1> //end::rest-client-init-default-headers @@ -132,14 +137,14 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { //tag::rest-client-init-allocation-aware-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(new NodeSelector() { // <1> @Override public void select(Iterable nodes) { @@ -173,7 +178,7 @@ public void select(Iterable nodes) { { //tag::rest-client-init-failure-listener RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setFailureListener(new RestClient.FailureListener() { @Override public void onFailure(Node node) { @@ -185,13 +190,13 @@ public void onFailure(Node node) { { //tag::rest-client-init-request-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setRequestConfigCallback( new RestClientBuilder.RequestConfigCallback() { @Override public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { - return requestConfigBuilder.setSocketTimeout(10000); // <1> + return requestConfigBuilder.setResponseTimeout(Timeout.ofMilliseconds(10000)); // <1> } }); //end::rest-client-init-request-config-callback @@ -199,13 +204,13 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-init-client-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { return httpClientBuilder.setProxy( - new HttpHost("proxy", 9000, "http")); // <1> + new HttpHost("http", "proxy", 9000)); // <1> } }); //end::rest-client-init-client-config-callback @@ -244,7 +249,7 @@ public void onFailure(Exception exception) { request.addParameter("pretty", "true"); //end::rest-client-parameters //tag::rest-client-body - request.setEntity(new NStringEntity( + request.setEntity(new StringEntity( "{\"json\":\"text\"}", ContentType.APPLICATION_JSON)); //end::rest-client-body @@ -334,8 +339,8 @@ public void commonConfiguration() throws Exception { public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { return requestConfigBuilder - .setConnectTimeout(5000) - .setSocketTimeout(60000); + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)); } }); //end::rest-client-config-timeouts @@ -343,8 +348,8 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-config-request-options-timeouts RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(5000) - .setSocketTimeout(60000) + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)) .build(); RequestOptions options = RequestOptions.DEFAULT.toBuilder() .setRequestConfig(requestConfig) @@ -359,7 +364,7 @@ public RequestConfig.Builder customizeRequestConfig( @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setDefaultIOReactorConfig( + return httpClientBuilder.setIOReactorConfig( IOReactorConfig.custom() .setIoThreadCount(1) .build()); @@ -369,10 +374,9 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-basic-auth - final CredentialsProvider credentialsProvider = - new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -388,10 +392,10 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-disable-preemptive-auth - final CredentialsProvider credentialsProvider = + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -418,12 +422,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(truststore, null); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-encrypted-communication @@ -444,12 +456,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(trustStore, null); final SSLContext sslContext = sslContextBuilder.build(); RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-trust-ca-pem @@ -473,12 +493,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadKeyMaterial(keyStore, keyStorePass.toCharArray()); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-mutual-tls-authentication @@ -486,7 +514,7 @@ public HttpAsyncClientBuilder customizeHttpClient( { //tag::rest-client-auth-bearer-token RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "Bearer u6iuAxZ0RG1Kcm5jVFI4eU4tZU9aVFEwT2F3")}; @@ -502,7 +530,7 @@ public HttpAsyncClientBuilder customizeHttpClient( (apiKeyId + ":" + apiKeySecret) .getBytes(StandardCharsets.UTF_8)); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "ApiKey " + apiKeyAuth)}; diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index b7cb0d87c02d9..eb3306cf2cea2 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -38,8 +38,8 @@ archivesBaseName = 'opensearch-rest-client-sniffer' dependencies { api project(":client:rest") - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -84,6 +84,7 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index c1a0fcf9a8acf..e6696c1fc4039 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -37,8 +37,8 @@ import com.fasterxml.jackson.core.JsonToken; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.Request; @@ -192,12 +192,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th publishAddressAsURI = URI.create(scheme + "://" + address); host = publishAddressAsURI.getHost(); } - publishedHost = new HttpHost(host, publishAddressAsURI.getPort(), publishAddressAsURI.getScheme()); + publishedHost = new HttpHost(publishAddressAsURI.getScheme(), host, publishAddressAsURI.getPort()); } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { while (parser.nextToken() != JsonToken.END_ARRAY) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); boundHosts.add( - new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()) + new HttpHost(boundAddressAsURI.getScheme(), boundAddressAsURI.getHost(), boundAddressAsURI.getPort()) ); } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java index cbf349e534deb..9b5e89fbeb038 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import java.util.Collections; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index 58b60ac13dee8..fd38eceee6224 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -33,10 +33,11 @@ package org.opensearch.client.sniff; import com.fasterxml.jackson.core.JsonFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 1d06e9353726d..b678fb050e8f8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -40,14 +40,13 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; import org.opensearch.client.Node; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import org.junit.Before; @@ -56,6 +55,7 @@ import java.io.StringWriter; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -181,7 +181,7 @@ public void handle(HttpExchange httpExchange) throws IOException { String nodesInfoBody = sniffResponse.nodesInfoBody; httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); try (OutputStream out = httpExchange.getResponseBody()) { - out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + out.write(nodesInfoBody.getBytes(StandardCharsets.UTF_8)); return; } } @@ -210,14 +210,14 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); String host = "host" + i; int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + HttpHost publishHost = new HttpHost(scheme.toString(), host, port); Set boundHosts = new HashSet<>(); boundHosts.add(publishHost); if (randomBoolean()) { int bound = between(1, 5); for (int b = 0; b < bound; b++) { - boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + boundHosts.add(new HttpHost(scheme.toString(), host + b, port)); } } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java index e4d1058282f5c..faab6babcaca6 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java index 25a3162e238ed..24ee540aa6364 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java @@ -33,7 +33,8 @@ package org.opensearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java index 304243e73c078..36923281dde6b 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java @@ -32,12 +32,12 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; import org.opensearch.client.sniff.Sniffer.DefaultScheduler; import org.opensearch.client.sniff.Sniffer.Scheduler; +import org.apache.hc.core5.http.HttpHost; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 3b612aab80851..8f3e446d8aefb 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.sniff.OpenSearchNodesSniffer; @@ -69,7 +69,7 @@ public void usage() throws IOException { { //tag::sniffer-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient).build(); //end::sniffer-init @@ -82,7 +82,7 @@ public void usage() throws IOException { { //tag::sniffer-interval RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient) .setSniffIntervalMillis(60000).build(); @@ -105,7 +105,7 @@ public void usage() throws IOException { { //tag::sniffer-https RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -118,7 +118,7 @@ public void usage() throws IOException { { //tag::sniff-request-timeout RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -131,7 +131,7 @@ public void usage() throws IOException { { //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new NodesSniffer() { @Override diff --git a/client/test/build.gradle b/client/test/build.gradle index 07d874cf01ea7..13e9bd6b9e34a 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -35,7 +35,7 @@ sourceCompatibility = JavaVersion.VERSION_11 group = "${group}.client.test" dependencies { - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java index 2b3e867929e27..b4eacdbf88827 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java @@ -43,7 +43,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import java.util.ArrayList; import java.util.HashMap; diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java index aeba9bde4bff4..6a01ed30e0c63 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java @@ -35,8 +35,9 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.http.Header; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java index 37ffe32d19509..07576dacffb03 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java @@ -32,13 +32,14 @@ package org.opensearch.test.rest; -import org.apache.http.util.EntityUtils; import org.opensearch.action.ActionFuture; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.ResponseListener; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.After; import org.junit.Before; @@ -145,6 +146,8 @@ public void onSuccess(Response response) { future.onResponse(EntityUtils.toString(response.getEntity())); } catch (IOException e) { future.onFailure(e); + } catch (ParseException e) { + future.onFailure(e); } } diff --git a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java index 2584a9b41f14d..10ee9393b343f 100644 --- a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java +++ b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java @@ -34,7 +34,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.xcontent.XContentHelper; @@ -73,7 +76,7 @@ public void testCreateIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testAliases() throws IOException { + public void testAliases() throws IOException, ParseException { assumeFalse("In this test, .opensearch_dashboards is the alias name", ".opensearch_dashboards".equals(indexName)); Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); @@ -96,7 +99,7 @@ public void testBulkToOpenSearchDashboardsIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testRefresh() throws IOException { + public void testRefresh() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); Response response = client().performRequest(request); @@ -114,7 +117,7 @@ public void testRefresh() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testGetFromOpenSearchDashboardsIndex() throws IOException { + public void testGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); request.addParameter("refresh", "true"); @@ -130,7 +133,7 @@ public void testGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { + public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -163,7 +166,7 @@ public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("tag")); } - public void testSearchFromOpenSearchDashboardsIndex() throws IOException { + public void testSearchFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -241,7 +244,7 @@ public void testUpdateIndexSettings() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testGetIndex() throws IOException { + public void testGetIndex() throws IOException, ParseException { Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); @@ -278,7 +281,7 @@ public void testIndexingAndUpdatingDocs() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testScrollingDocs() throws IOException { + public void testScrollingDocs() throws IOException, OpenSearchParseException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 43adffc6f7671..bb1a9d190313f 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -33,7 +33,8 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index 34fcd245289be..f8e9018bce6df 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -32,10 +32,10 @@ package org.opensearch.index.reindex; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.NoopHostnameVerifier; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.opensearch.common.Strings; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; +import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -161,16 +161,24 @@ private void reload() { } /** - * Encapsulate the loaded SSL configuration as a HTTP-client {@link SSLIOSessionStrategy}. + * Encapsulate the loaded SSL configuration as a HTTP-client {@link TlsStrategy}. * The returned strategy is immutable, but successive calls will return different objects that may have different * configurations if the underlying key/certificate files are modified. */ - SSLIOSessionStrategy getStrategy() { + TlsStrategy getStrategy() { final HostnameVerifier hostnameVerifier = configuration.getVerificationMode().isHostnameVerificationEnabled() ? new DefaultHostnameVerifier() : new NoopHostnameVerifier(); - final String[] protocols = configuration.getSupportedProtocols().toArray(Strings.EMPTY_ARRAY); - final String[] cipherSuites = configuration.getCipherSuites().toArray(Strings.EMPTY_ARRAY); - return new SSLIOSessionStrategy(context, protocols, cipherSuites, hostnameVerifier); + + final String[] protocols = configuration.getSupportedProtocols().toArray(new String[0]); + final String[] cipherSuites = configuration.getCipherSuites().toArray(new String[0]); + + return ClientTlsStrategyBuilder.create() + .setSslContext(context) + .setHostnameVerifier(hostnameVerifier) + .setCiphers(cipherSuites) + .setTlsVersions(protocols) + .build(); + } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 8ade055d10f60..aa9accbd90e21 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -33,15 +33,18 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequestInterceptor; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequestInterceptor; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.util.Timeout; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; @@ -202,21 +205,23 @@ static RestClient buildRestClient( for (Map.Entry header : remoteInfo.getHeaders().entrySet()) { clientHeaders[i++] = new BasicHeader(header.getKey(), header.getValue()); } - final RestClientBuilder builder = RestClient.builder( - new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme()) - ).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { - c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); - c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); + final HttpHost httpHost = new HttpHost(remoteInfo.getScheme(), remoteInfo.getHost(), remoteInfo.getPort()); + final RestClientBuilder builder = RestClient.builder(httpHost).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { + c.setConnectTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getConnectTimeout().millis()))); + c.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getSocketTimeout().millis()))); return c; }).setHttpClientConfigCallback(c -> { // Enable basic auth if it is configured if (remoteInfo.getUsername() != null) { - UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), remoteInfo.getPassword()); - CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, creds); + UsernamePasswordCredentials creds = new UsernamePasswordCredentials( + remoteInfo.getUsername(), + remoteInfo.getPassword().toCharArray() + ); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(httpHost, null, "Basic"), creds); c.setDefaultCredentialsProvider(credentialsProvider); } else { - restInterceptor.ifPresent(interceptor -> c.addInterceptorLast(interceptor)); + restInterceptor.ifPresent(interceptor -> c.addRequestInterceptorLast(interceptor)); } // Stick the task id in the thread name so we can track down tasks from stack traces AtomicInteger threads = new AtomicInteger(); @@ -227,8 +232,13 @@ static RestClient buildRestClient( return t; }); // Limit ourselves to one reactor thread because for now the search process is single threaded. - c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); - c.setSSLStrategy(sslConfig.getStrategy()); + c.setIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(sslConfig.getStrategy()) + .build(); + + c.setConnectionManager(connectionManager); return c; }); if (Strings.hasLength(remoteInfo.getPathPrefix()) && "/".equals(remoteInfo.getPathPrefix()) == false) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 8467fbdeacd0e..873bd7c3b48cb 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; @@ -240,7 +240,7 @@ static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } @@ -258,7 +258,7 @@ static Request clearScroll(String scroll, Version remoteVersion) { if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } try (XContentBuilder entity = JsonXContent.contentBuilder()) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index be691243ecf84..3a943450a1a89 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -32,10 +32,11 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -199,7 +200,7 @@ public void onSuccess(org.opensearch.client.Response response) { InputStream content = responseEntity.getContent(); XContentType xContentType = null; if (responseEntity.getContentType() != null) { - final String mimeType = ContentType.parse(responseEntity.getContentType().getValue()).getMimeType(); + final String mimeType = ContentType.parse(responseEntity.getContentType()).getMimeType(); xContentType = XContentType.fromMediaType(mimeType); } if (xContentType == null) { @@ -284,7 +285,11 @@ private static String bodyMessage(@Nullable HttpEntity entity) throws IOExceptio if (entity == null) { return "No error body."; } else { - return "body=" + EntityUtils.toString(entity); + try { + return "body=" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java index 034981c969b4b..0646c9b5d8705 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java @@ -6,7 +6,8 @@ package org.opensearch.index.reindex.spi; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.reindex.ReindexRequest; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index c349bc54bcbd9..e7af54a0563d3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; @@ -245,7 +245,7 @@ public void testInitialSearchEntity() throws IOException { searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; HttpEntity entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); if (remoteVersion.onOrAfter(Version.fromId(1000099))) { assertEquals( "{\"query\":" + query + ",\"_source\":true}", @@ -261,7 +261,7 @@ public void testInitialSearchEntity() throws IOException { // Source filtering is included if set up searchRequest.source().fetchSource(new String[] { "in1", "in2" }, new String[] { "out" }); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertEquals( "{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)) @@ -287,7 +287,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertThat( Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -295,14 +295,14 @@ public void testScrollEntity() throws IOException { // Test with version < 2.0.0 entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromId(1070499)).getEntity(); - assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); Request request = clearScroll(scroll, Version.fromString("5.0.0")); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); assertThat( Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -311,7 +311,7 @@ public void testClearScroll() throws IOException { // Test with version < 2.0.0 request = clearScroll(scroll, Version.fromId(1070499)); - assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8))); assertThat(request.getParameters().keySet(), empty()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 337bc67796f8e..c0e2bd14f55bc 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -32,31 +32,14 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchStatusException; import org.opensearch.Version; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchRequest; -import org.opensearch.client.HeapBufferedAsyncResponseConsumer; import org.opensearch.client.RestClient; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import org.opensearch.common.ParsingException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.FileSystemUtils; @@ -74,13 +57,32 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.junit.After; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -97,7 +99,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -444,24 +445,49 @@ public void testWrapExceptionToPreserveStatus() throws IOException { @SuppressWarnings({ "unchecked", "rawtypes" }) public void testTooLargeResponse() throws Exception { ContentTooLongException tooLong = new ContentTooLongException("too long!"); - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).then(new Answer>() { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; - FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + assertEquals( + new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), + ((HeapBufferedAsyncResponseConsumer) responseConsumer).getBufferLimit() + ); callback.failed(tooLong); return null; } - }); + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); Throwable e = expectThrows(RuntimeException.class, source::start); @@ -539,46 +565,68 @@ private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteV } } - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).thenAnswer(new Answer>() { - + final CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { int responseCount = 0; @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - // Throw away the current thread context to simulate running async httpclient's thread pool - threadPool.getThreadContext().stashContext(); - HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestProducer.generateRequest(); - URL resource = resources[responseCount]; - String path = paths[responseCount++]; - ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - if (path.startsWith("fail:")) { - String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); - if (path.equals("fail:rejection.json")) { - StatusLine statusLine = new BasicStatusLine(protocolVersion, RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - futureCallback.completed(httpResponse); + public void close(CloseMode closeMode) {} + + @Override + public void close() throws IOException {} + + @Override + public void start() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + + @Override + public void initiateShutdown() {} + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + try { + // Throw away the current thread context to simulate running async httpclient's thread pool + threadPool.getThreadContext().stashContext(); + ClassicHttpRequest request = getRequest(requestProducer); + URL resource = resources[responseCount]; + String path = paths[responseCount++]; + if (path.startsWith("fail:")) { + String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); + if (path.equals("fail:rejection.json")) { + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); + callback.completed((T) httpResponse); + } else { + callback.failed(new RuntimeException(body)); + } } else { - futureCallback.failed(new RuntimeException(body)); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, ""); + httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); + callback.completed((T) httpResponse); } - } else { - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, ""); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); - futureCallback.completed(httpResponse); + return null; + } catch (IOException ex) { + throw new UncheckedIOException(ex); } - return null; } - }); + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + }; + return sourceWithMockedClient(mockRemoteVersion, httpClient); } @@ -649,4 +697,9 @@ private T expectListenerFailure(Class expectedExcept assertNotNull(exception.get()); return exception.get(); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 3d0c09fb2288c..cbadcba5ef6f0 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -34,9 +34,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.Strings; @@ -49,6 +46,9 @@ import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import java.io.IOException; @@ -144,7 +144,7 @@ private static HttpEntity buildRepositorySettings(final String type, final Setti builder.endObject(); } builder.endObject(); - return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); } } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index 96e21e0e05ff7..fbac1f1c52e95 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -32,7 +32,6 @@ package org.opensearch.rest.discovery; -import org.apache.http.HttpHost; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; @@ -49,9 +48,11 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.Matchers; import java.io.IOException; +import java.net.URISyntaxException; import java.util.Collections; import java.util.List; @@ -124,6 +125,8 @@ public Settings onNodeStopped(String nodeName) throws IOException { .get(); assertFalse(nodeName, clusterHealthResponse.isTimedOut()); return Settings.EMPTY; + } catch (final URISyntaxException ex) { + throw new IOException(ex); } finally { restClient.setNodes(allNodes); } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index e8417f9ceaf2c..1478f48f16650 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -40,8 +40,6 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; -import org.apache.http.HttpStatus; - import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -63,7 +61,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; - +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 616a1ae9feb4f..6850b204e0112 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -37,8 +37,8 @@ import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.HttpHandler; import fixture.gcs.FakeOAuth2HttpHandler; -import org.apache.http.HttpStatus; +import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 5691154882c9f..ebed71d90df9a 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -32,9 +32,9 @@ package org.opensearch.search; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -343,7 +343,7 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set builder.endObject(); requestBody = Strings.toString(builder); } - return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + return new StringEntity(requestBody, ContentType.APPLICATION_JSON); } private static class HighLevelClient extends RestHighLevelClient { diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index a3db7532c3a10..8eb4bc3230b41 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -53,6 +52,8 @@ import org.opensearch.test.XContentTestUtils; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.Before; import java.io.IOException; @@ -286,7 +287,7 @@ public void testClusterState() throws Exception { } - public void testShrink() throws IOException { + public void testShrink() throws IOException, NumberFormatException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -356,7 +357,7 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } - public void testShrinkAfterUpgrade() throws IOException { + public void testShrinkAfterUpgrade() throws IOException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -444,7 +445,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws IOException, ParseException { if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" @@ -526,7 +527,7 @@ void assertBasicSearchWorks(int count) throws IOException { } } - void assertAllSearchWorks(int count) throws IOException { + void assertAllSearchWorks(int count) throws IOException, ParseException { logger.info("--> testing _all search"); Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(response); @@ -623,14 +624,14 @@ void assertStoredBinaryFields(int count) throws Exception { } } - static String toStr(Response response) throws IOException { + static String toStr(Response response) throws IOException, ParseException { return EntityUtils.toString(response.getEntity()); } /** * Tests that a single document survives. Super basic smoke test. */ - public void testSingleDoc() throws IOException { + public void testSingleDoc() throws IOException, ParseException { String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; @@ -792,7 +793,7 @@ public void testRecovery() throws Exception { * old and new versions. All of the snapshots include an index, a template, * and some routing configuration. */ - public void testSnapshotRestore() throws IOException { + public void testSnapshotRestore() throws IOException, ParseException { int count; if (isRunningAgainstOldCluster()) { // Create the index @@ -1060,7 +1061,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException, ParseException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1179,7 +1180,7 @@ private void indexDocument(String id) throws IOException { assertOK(client().performRequest(indexRequest)); } - private int countOfIndexedRandomDocuments() throws IOException { + private int countOfIndexedRandomDocuments() throws IOException, NumberFormatException, ParseException { return Integer.parseInt(loadInfoDocument(index + "_count")); } @@ -1194,7 +1195,7 @@ private void saveInfoDocument(String id, String value) throws IOException { client().performRequest(request); } - private String loadInfoDocument(String id) throws IOException { + private String loadInfoDocument(String id) throws IOException, ParseException { Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); String doc = toStr(client().performRequest(request)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index 856dc45d42203..44ed426e13782 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -32,7 +32,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -235,7 +236,7 @@ public void testQueryBuilderBWC() throws Exception { } } - private static Map toMap(Response response) throws IOException { + private static Map toMap(Response response) throws IOException, ParseException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java index f85a94cc9f556..35f530f22a141 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java @@ -8,7 +8,9 @@ package org.opensearch.backwards; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.Request; @@ -21,8 +23,6 @@ import java.util.Collections; import java.util.Map; -import static org.apache.http.HttpStatus.SC_NOT_FOUND; - public class ExceptionIT extends OpenSearchRestTestCase { public void testOpensearchException() throws Exception { logClusterNodes(); @@ -38,13 +38,13 @@ public void testOpensearchException() throws Exception { } catch (ResponseException e) { logger.debug(e.getMessage()); Response response = e.getResponse(); - assertEquals(SC_NOT_FOUND, response.getStatusLine().getStatusCode()); + assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatusLine().getStatusCode()); assertEquals("no_such_index", ObjectPath.createFromResponse(response).evaluate("error.index")); } } } - private void logClusterNodes() throws IOException { + private void logClusterNodes() throws IOException, ParseException { ObjectPath objectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "_nodes"))); Map nodes = objectPath.evaluate("nodes"); // As of 2.0, 'GET _cat/master' API is deprecated to promote inclusive language. diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 69c4f0110a3ff..4746ad35a9406 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -31,7 +31,7 @@ package org.opensearch.backwards; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -50,6 +50,7 @@ import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; +import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -416,7 +417,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th return shards; } - private Nodes buildNodeAndVersions() throws IOException { + private Nodes buildNodeAndVersions() throws IOException, URISyntaxException { Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); @@ -426,7 +427,7 @@ private Nodes buildNodeAndVersions() throws IOException { id, objectPath.evaluate("nodes." + id + ".name"), Version.fromString(objectPath.evaluate("nodes." + id + ".version")), - HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); + HttpHost.create((String)objectPath.evaluate("nodes." + id + ".http.publish_address")))); } response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setClusterManagerNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java index 61ccbab95850d..5f73144501f94 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java @@ -31,7 +31,7 @@ package org.opensearch.cluster.remote.test; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -104,8 +104,8 @@ private HighLevelClient(RestClient restClient) { private RestHighLevelClient buildClient(final String url) throws IOException { int portSeparator = url.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + HttpHost httpHost = new HttpHost(getProtocol(), url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1))); return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[]{httpHost})); } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 888fa886c3c5e..ed4bf11041c88 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -31,7 +31,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -61,7 +62,7 @@ */ public class IndexingIT extends AbstractRollingTestCase { - public void testIndexing() throws IOException { + public void testIndexing() throws IOException, ParseException { switch (CLUSTER_TYPE) { case OLD: break; @@ -203,7 +204,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio client().performRequest(bulk); } - private void assertCount(String index, int count) throws IOException { + private void assertCount(String index, int count) throws IOException, ParseException { Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); searchTestIndexRequest.addParameter("filter_path", "hits.total"); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index dbb18aec19fca..4fd82af9603a9 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -31,7 +31,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; @@ -50,6 +49,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matcher; import org.hamcrest.Matchers; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java index 6178167c98e98..0c845bb2d34e5 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java @@ -34,7 +34,8 @@ import java.io.IOException; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -60,7 +61,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public void testThatErrorTraceParamReturns400() throws IOException { + public void testThatErrorTraceParamReturns400() throws IOException, ParseException { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java index 090a572ef0d6a..e2ccf86d31dbf 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -47,7 +48,7 @@ */ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { - public void testThatErrorTraceWorksByDefault() throws IOException { + public void testThatErrorTraceWorksByDefault() throws IOException, ParseException { try { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java index 1925ecc5cd346..5514fae996a39 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java @@ -31,9 +31,10 @@ package org.opensearch.http; -import org.apache.http.HttpHeaders; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -56,7 +57,7 @@ public class HttpCompressionIT extends OpenSearchRestTestCase { " }\n" + "}"; - public void testCompressesResponseIfRequested() throws IOException { + public void testCompressesResponseIfRequested() throws IOException, ParseException { Request request = new Request("POST", "/company/_doc/2"); request.setJsonEntity(SAMPLE_DOCUMENT); Response response = client().performRequest(request); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java index c3d766abe96ca..8e6dea7edd0f8 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -46,7 +47,7 @@ public class NoHandlerIT extends HttpSmokeTestCase { - public void testNoHandlerRespectsAcceptHeader() throws IOException { + public void testNoHandlerRespectsAcceptHeader() throws IOException, ParseException { runTestNoHandlerRespectsAcceptHeader( "application/json", "application/json; charset=UTF-8", @@ -58,7 +59,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { } private void runTestNoHandlerRespectsAcceptHeader( - final String accept, final String contentType, final String expect) throws IOException { + final String accept, final String contentType, final String expect) throws IOException, ParseException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Accept", accept); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java index b8257272ba65b..74b85ace37b81 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java @@ -30,7 +30,7 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java index a13d406f7b133..42c7357de3f07 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java @@ -31,8 +31,8 @@ package org.opensearch.http; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.opensearch.action.admin.cluster.node.info.NodeInfo; @@ -109,7 +109,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); } @@ -158,7 +158,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); } diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index f85c3efcbb6e8..2b1abe45f7723 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,20 +32,22 @@ package org.opensearch.wildfly.transport; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import javax.enterprise.inject.Produces; + +import java.net.URISyntaxException; import java.nio.file.Path; @SuppressWarnings("unused") public final class RestHighLevelClientProducer { @Produces - public RestHighLevelClient createRestHighLevelClient() { + public RestHighLevelClient createRestHighLevelClient() throws URISyntaxException { String httpUri = System.getProperty("opensearch.uri"); return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); diff --git a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java index 7961ca69c2d29..2f2b355baedaf 100644 --- a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java @@ -32,14 +32,15 @@ package org.opensearch.wildfly; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -78,7 +79,7 @@ private String buildBaseUrl() { return "http://localhost:" + port + "/example-app/transport"; } - public void testRestClient() throws URISyntaxException, IOException { + public void testRestClient() throws URISyntaxException, IOException, ParseException { final String baseUrl = buildBaseUrl(); try (CloseableHttpClient client = HttpClientBuilder.create().build()) { @@ -100,7 +101,7 @@ public void testRestClient() throws URISyntaxException, IOException { put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); try (CloseableHttpResponse response = client.execute(put)) { - int status = response.getStatusLine().getStatusCode(); + int status = response.getCode(); assertThat( "expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), status, diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index bcf0b704374c9..60b704dc12f0c 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -106,6 +106,11 @@ grant codeBase "${codebase.opensearch-rest-client}" { permission java.net.NetPermission "getProxySelector"; }; +grant codeBase "${codebase.httpcore5}" { + // httpcore makes socket connections for rest tests + permission java.net.SocketPermission "*", "connect"; +}; + grant codeBase "${codebase.httpcore-nio}" { // httpcore makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; @@ -118,6 +123,7 @@ grant codeBase "${codebase.httpasyncclient}" { permission java.net.NetPermission "getProxySelector"; }; + grant codeBase "${codebase.junit-rt.jar}" { // allows IntelliJ IDEA JUnit test runner to control number of test iterations permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; diff --git a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java index 69710b8e5c848..eb5177bc0f39b 100644 --- a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java @@ -36,7 +36,8 @@ import java.util.Map; import joptsimple.internal.Strings; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import org.opensearch.test.OpenSearchTestCase; /** diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index d7b057e5479eb..adaf95ae67a8e 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -34,8 +34,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; -import org.apache.http.ConnectionClosedException; -import org.apache.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; @@ -46,6 +44,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.Before; diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index f082c7a45a207..28dbcf478eb86 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpStatus; + +import org.apache.hc.core5.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1ab7785b17f5e..b20154fff9256 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -36,7 +36,8 @@ import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -2344,7 +2345,7 @@ protected static RestClient createRestClient( if (node.getInfo(HttpInfo.class) != null) { TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); } } RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()])); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index bbf7763551bcd..348ea0a924b70 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -32,15 +32,19 @@ package org.opensearch.test.rest; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.apache.lucene.util.SetOnce; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -136,7 +140,7 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase { * Convert the entity from a {@link Response} into a map of maps. */ public static Map entityAsMap(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -154,7 +158,7 @@ public static Map entityAsMap(Response response) throws IOExcept * Convert the entity from a {@link Response} into a list of maps. */ public static List entityAsList(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -344,7 +348,7 @@ public boolean warningsShouldFailRequest(List warnings) { * Construct an HttpHost from the given host and port */ protected HttpHost buildHttpHost(String host, int port) { - return new HttpHost(host, port, getProtocol()); + return new HttpHost(getProtocol(), host, port); } /** @@ -845,9 +849,16 @@ protected static void configureClient(RestClientBuilder builder, Settings settin try (InputStream is = Files.newInputStream(path)) { keyStore.load(is, keystorePass.toCharArray()); } - SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); - SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(sslcontext); - builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy)); + final SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); + builder.setHttpClientConfigCallback(httpClientBuilder -> { + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(sslcontext).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); + }); } catch (KeyStoreException | NoSuchAlgorithmException | KeyManagementException | CertificateException e) { throw new RuntimeException("Error setting up ssl", e); } @@ -864,7 +875,9 @@ protected static void configureClient(RestClientBuilder builder, Settings settin socketTimeoutString == null ? "60s" : socketTimeoutString, CLIENT_SOCKET_TIMEOUT ); - builder.setRequestConfigCallback(conf -> conf.setSocketTimeout(Math.toIntExact(socketTimeout.getMillis()))); + builder.setRequestConfigCallback( + conf -> conf.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(socketTimeout.getMillis()))) + ); if (settings.hasValue(CLIENT_PATH_PREFIX)) { builder.setPathPrefix(settings.get(CLIENT_PATH_PREFIX)); } @@ -1082,7 +1095,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx } protected static Map responseAsMap(Response response) throws IOException { - XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType()); Map responseEntity = XContentHelper.convertToMap( entityContentType.xContent(), response.getEntity().getContent(), diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java index cd5f1fe168b12..da71d0e078dc0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -32,8 +32,8 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.client.Request; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 56ccb91dc3331..13ede9d44f1ad 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -32,11 +32,13 @@ package org.opensearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -188,16 +190,20 @@ public ClientYamlTestResponse callApi( if (false == restApi.isBodySupported()) { throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api"); } - String contentType = entity.getContentType().getValue(); + String contentType = entity.getContentType(); // randomly test the GET with source param instead of GET/POST with body - if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { - logger.debug("sending the request body as source param with GET method"); - queryStringParams.put("source", EntityUtils.toString(entity)); - queryStringParams.put("source_content_type", contentType); - requestMethod = HttpGet.METHOD_NAME; - entity = null; - } else { - requestMethod = RandomizedTest.randomFrom(supportedMethods); + try { + if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { + logger.debug("sending the request body as source param with GET method"); + queryStringParams.put("source", EntityUtils.toString(entity)); + queryStringParams.put("source_content_type", contentType); + requestMethod = HttpGet.METHOD_NAME; + entity = null; + } else { + requestMethod = RandomizedTest.randomFrom(supportedMethods); + } + } catch (final ParseException ex) { + throw new IOException(ex); } } else { if (restApi.isBodyRequired()) { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 78818aefe44cc..780c43b6ccc11 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 8fc0554e2b31e..1e441e01c5a69 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -31,9 +31,9 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.Header; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java index 473511825ef60..aa70f7883c4b8 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java @@ -31,7 +31,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index f228c87186afd..b5449480e38ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -34,7 +34,8 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.client.Node; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 36186ea330021..1947982f19247 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -32,7 +32,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.test.OpenSearchTestCase; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 1fb08934c8b8b..eceb78a832710 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -32,7 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.apache.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.NodeSelector; @@ -43,6 +42,7 @@ import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.ClientYamlTestResponse; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.MatcherAssert; import java.io.IOException; From 7c03f89d990da85d8c6ec95a88f865649a67a8a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Oct 2022 16:39:25 -0700 Subject: [PATCH 047/122] Bump woodstox-core from 6.3.0 to 6.3.1 in /plugins/repository-hdfs (#4737) * Bump woodstox-core from 6.3.0 to 6.3.1 in /plugins/repository-hdfs Bumps [woodstox-core](https://github.com/FasterXML/woodstox) from 6.3.0 to 6.3.1. - [Release notes](https://github.com/FasterXML/woodstox/releases) - [Commits](https://github.com/FasterXML/woodstox/compare/woodstox-core-6.3.0...woodstox-core-6.3.1) --- updated-dependencies: - dependency-name: com.fasterxml.woodstox:woodstox-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e321efc39a2bf..8b29a5d38e7c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `protobuf-java` from 3.21.2 to 3.21.7 - Bumps `azure-core` from 1.31.0 to 1.33.0 - Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 ### Added diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3cdac4c2d2560..792bdc6bacd4a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -85,7 +85,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.3.0' + implementation 'com.fasterxml.woodstox:woodstox-core:6.3.1' implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 deleted file mode 100644 index ebd85df98b39e..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03c1df4164b107ee22ad4f24bd453ec78a0efd95 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 new file mode 100644 index 0000000000000..fb4e67404cd42 --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 @@ -0,0 +1 @@ +bf29b07ca4dd81ef3c0bc18c8bd5617510a81c5d \ No newline at end of file From 10f8ed8282579ed08d6dba89fce4fd589b75802d Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 11 Oct 2022 23:13:42 -0700 Subject: [PATCH 048/122] Format changelog as per plugin requirements (#4740) --- CHANGELOG.md | 36 ++++++------------------------------ 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b29a5d38e7c6..7ef9454e6ddd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,15 +3,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] -### Dependencies -- Bumps `gson` from 2.9.0 to 2.9.1 -- Bumps `protobuf-java` from 3.21.2 to 3.21.7 -- Bumps `azure-core` from 1.31.0 to 1.33.0 -- Bumps `avro` from 1.11.0 to 1.11.1 -- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - ### Added - - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -37,16 +29,17 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - - -### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) - Bumps `azure-core` from 1.27.0 to 1.31.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) @@ -59,9 +52,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - ### Changed - - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -85,9 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - ### Deprecated - ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) @@ -95,9 +84,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) - ### Fixed - - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -138,15 +125,11 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - ### Security - - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] - ### Added - - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -154,19 +137,12 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - ### Changed - ### Deprecated - ### Removed - ### Fixed - - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x From c92846d0ae121db54ce740790df75b36a24f804e Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 12 Oct 2022 13:32:27 -0500 Subject: [PATCH 049/122] Better plural stemmer than minimal_english (#4738) Drops the trailing "e" in taxes, dresses, watches, dishes etc that otherwise cause mismatches with plural and singular forms. Signed-off-by: Nicholas Walter Knize Co-authored-by: Mark Harwood Co-authored-by: Nicholas Walter Knize --- CHANGELOG.md | 2 + .../common/EnglishPluralStemFilter.java | 182 ++++++++++++++++++ .../common/StemmerTokenFilterFactory.java | 2 + .../StemmerTokenFilterFactoryTests.java | 77 ++++++++ 4 files changed, 263 insertions(+) create mode 100644 modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ef9454e6ddd4..3cdedbab9cef0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -143,6 +143,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) + ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java new file mode 100644 index 0000000000000..c30318a31527b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; + +import java.io.IOException; + +public final class EnglishPluralStemFilter extends TokenFilter { + private final EnglishPluralStemmer stemmer = new EnglishPluralStemmer(); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class); + + public EnglishPluralStemFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + if (!keywordAttr.isKeyword()) { + final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length()); + termAtt.setLength(newlen); + } + return true; + } else { + return false; + } + } + + /** + * Plural stemmer for English based on the {@link EnglishMinimalStemFilter} + *

    + * This stemmer removes plurals but beyond EnglishMinimalStemFilter adds + * four new suffix rules to remove dangling e characters: + *

      + *
    • xes - "boxes" becomes "box"
    • + *
    • sses - "dresses" becomes "dress"
    • + *
    • shes - "dishes" becomes "dish"
    • + *
    • tches - "watches" becomes "watch"
    • + *
    + * See https://github.com/elastic/elasticsearch/issues/42892 + *

    + * In addition the s stemmer logic is amended so that + *

      + *
    • ees->ee so that bees matches bee
    • + *
    • ies->y only on longer words to that ties matches tie
    • + *
    • oes->o rule so that tomatoes matches tomato but retains e for some words eg shoes to shoe
    • + *
    + */ + public static class EnglishPluralStemmer { + + // Words ending in oes that retain the e when stemmed + public static final char[][] oesExceptions = { "shoes".toCharArray(), "canoes".toCharArray(), "oboes".toCharArray() }; + // Words ending in ches that retain the e when stemmed + public static final char[][] chesExceptions = { + "cliches".toCharArray(), + "avalanches".toCharArray(), + "mustaches".toCharArray(), + "moustaches".toCharArray(), + "quiches".toCharArray(), + "headaches".toCharArray(), + "heartaches".toCharArray(), + "porsches".toCharArray(), + "tranches".toCharArray(), + "caches".toCharArray() }; + + @SuppressWarnings("fallthrough") + public int stem(char s[], int len) { + if (len < 3 || s[len - 1] != 's') return len; + + switch (s[len - 2]) { + case 'u': + case 's': + return len; + case 'e': + // Modified ies->y logic from original s-stemmer - only work on strings > 4 + // so spies -> spy still but pies->pie. + // The original code also special-cased aies and eies for no good reason as far as I can tell. + // ( no words of consequence - eg http://www.thefreedictionary.com/words-that-end-in-aies ) + if (len > 4 && s[len - 3] == 'i') { + s[len - 3] = 'y'; + return len - 2; + } + + // Suffix rules to remove any dangling "e" + if (len > 3) { + // xes (but >1 prefix so we can stem "boxes->box" but keep "axes->axe") + if (len > 4 && s[len - 3] == 'x') { + return len - 2; + } + // oes + if (len > 3 && s[len - 3] == 'o') { + if (isException(s, len, oesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + } + if (len > 4) { + // shes/sses + if (s[len - 4] == 's' && (s[len - 3] == 'h' || s[len - 3] == 's')) { + return len - 2; + } + + // ches + if (len > 4) { + if (s[len - 4] == 'c' && s[len - 3] == 'h') { + if (isException(s, len, chesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + + } + } + } + } + + default: + return len - 1; + } + } + + private boolean isException(char[] s, int len, char[][] exceptionsList) { + for (char[] oesRule : exceptionsList) { + int rulePos = oesRule.length - 1; + int sPos = len - 1; + boolean matched = true; + while (rulePos >= 0 && sPos >= 0) { + if (oesRule[rulePos] != s[sPos]) { + matched = false; + break; + } + rulePos--; + sPos--; + } + if (matched) { + return true; + } + } + return false; + } + } + +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 5d96f01265cf6..fc045447e159e 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -154,6 +154,8 @@ public TokenStream create(TokenStream tokenStream) { return new SnowballFilter(tokenStream, new EnglishStemmer()); } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { return new EnglishMinimalStemFilter(tokenStream); + } else if ("plural_english".equalsIgnoreCase(language) || "pluralEnglish".equalsIgnoreCase(language)) { + return new EnglishPluralStemFilter(tokenStream); } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { return new EnglishPossessiveFilter(tokenStream); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java index 2cd7b74cd8c35..18d3727475065 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -111,6 +111,83 @@ public void testPorter2FilterFactory() throws IOException { } } + public void testEnglishPluralFilter() throws IOException { + int iters = scaledRandomIntBetween(20, 100); + for (int i = 0; i < iters; i++) { + + Version v = VersionUtils.randomVersion(random()); + Settings settings = Settings.builder() + .put("index.analysis.filter.my_plurals.type", "stemmer") + .put("index.analysis.filter.my_plurals.language", "plural_english") + .put("index.analysis.analyzer.my_plurals.tokenizer", "whitespace") + .put("index.analysis.analyzer.my_plurals.filter", "my_plurals") + .put(SETTING_VERSION_CREATED, v) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_plurals"); + assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("dresses")); + TokenStream create = tokenFilter.create(tokenizer); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("my_plurals"); + assertThat(create, instanceOf(EnglishPluralStemFilter.class)); + + // Check old EnglishMinimalStemmer ("S" stemmer) logic + assertAnalyzesTo(analyzer, "phones", new String[] { "phone" }); + assertAnalyzesTo(analyzer, "horses", new String[] { "horse" }); + assertAnalyzesTo(analyzer, "cameras", new String[] { "camera" }); + + // The orginal s stemmer gives up on stemming oes words because English has no fixed rule for the stem + // (see https://howtospell.co.uk/making-O-words-plural ) + // This stemmer removes the es but retains e for a small number of exceptions + assertAnalyzesTo(analyzer, "mosquitoes", new String[] { "mosquito" }); + assertAnalyzesTo(analyzer, "heroes", new String[] { "hero" }); + // oes exceptions that retain the e. + assertAnalyzesTo(analyzer, "shoes", new String[] { "shoe" }); + assertAnalyzesTo(analyzer, "horseshoes", new String[] { "horseshoe" }); + assertAnalyzesTo(analyzer, "canoes", new String[] { "canoe" }); + assertAnalyzesTo(analyzer, "oboes", new String[] { "oboe" }); + + // Check improved EnglishPluralStemFilter logic + // sses + assertAnalyzesTo(analyzer, "dresses", new String[] { "dress" }); + assertAnalyzesTo(analyzer, "possess", new String[] { "possess" }); + assertAnalyzesTo(analyzer, "possesses", new String[] { "possess" }); + // xes + assertAnalyzesTo(analyzer, "boxes", new String[] { "box" }); + assertAnalyzesTo(analyzer, "axes", new String[] { "axe" }); + // shes + assertAnalyzesTo(analyzer, "dishes", new String[] { "dish" }); + assertAnalyzesTo(analyzer, "washes", new String[] { "wash" }); + // ees + assertAnalyzesTo(analyzer, "employees", new String[] { "employee" }); + assertAnalyzesTo(analyzer, "bees", new String[] { "bee" }); + // tch + assertAnalyzesTo(analyzer, "watches", new String[] { "watch" }); + assertAnalyzesTo(analyzer, "itches", new String[] { "itch" }); + // ies->y but only for length >4 + assertAnalyzesTo(analyzer, "spies", new String[] { "spy" }); + assertAnalyzesTo(analyzer, "ties", new String[] { "tie" }); + assertAnalyzesTo(analyzer, "lies", new String[] { "lie" }); + assertAnalyzesTo(analyzer, "pies", new String[] { "pie" }); + assertAnalyzesTo(analyzer, "dies", new String[] { "die" }); + + assertAnalyzesTo(analyzer, "lunches", new String[] { "lunch" }); + assertAnalyzesTo(analyzer, "avalanches", new String[] { "avalanche" }); + assertAnalyzesTo(analyzer, "headaches", new String[] { "headache" }); + assertAnalyzesTo(analyzer, "caches", new String[] { "cache" }); + assertAnalyzesTo(analyzer, "beaches", new String[] { "beach" }); + assertAnalyzesTo(analyzer, "britches", new String[] { "britch" }); + assertAnalyzesTo(analyzer, "cockroaches", new String[] { "cockroach" }); + assertAnalyzesTo(analyzer, "cliches", new String[] { "cliche" }); + assertAnalyzesTo(analyzer, "quiches", new String[] { "quiche" }); + + } + } + public void testMultipleLanguagesThrowsException() throws IOException { Version v = VersionUtils.randomVersion(random()); Settings settings = Settings.builder() From beb99150a84d54bb26182d50e363beea79d1a09a Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 12 Oct 2022 14:49:01 -0500 Subject: [PATCH 050/122] Install and configure Log4j JUL adapter for Lucene 9.4 (#4754) Adds log4j-jul dependency (along with license and notice files) and configures the java.util.logging log4j adapater to route all JUL logs to the proper opensearch locations setup through the LogConfigurator for Lucene 9.4 compatibility. Signed-off-by: Nicholas Knize --- CHANGELOG.md | 1 + client/rest/build.gradle | 1 + qa/os/build.gradle | 1 + server/build.gradle | 1 + server/licenses/log4j-jul-2.17.1.jar.sha1 | 1 + server/licenses/log4j-jul-LICENSE.txt | 202 ++++++++++++++++++ server/licenses/log4j-jul-NOTICE.txt | 20 ++ .../common/logging/LogConfigurator.java | 1 + 8 files changed, 228 insertions(+) create mode 100644 server/licenses/log4j-jul-2.17.1.jar.sha1 create mode 100644 server/licenses/log4j-jul-LICENSE.txt create mode 100644 server/licenses/log4j-jul-NOTICE.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cdedbab9cef0..28a542206eb5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -137,6 +137,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. +- Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) ### Changed ### Deprecated ### Removed diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 178359948f32e..eacef14d17ce2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -56,6 +56,7 @@ dependencies { testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-jul:${versions.log4j}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 92c5e4f154ad8..9a1e6f781faec 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -42,6 +42,7 @@ dependencies { api "org.apache.httpcomponents:fluent-hc:${versions.httpclient}" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-jcl:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/server/build.gradle b/server/build.gradle index 9d9d12e798eab..d50be48afc023 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -129,6 +129,7 @@ dependencies { // logging api "org.apache.logging.log4j:log4j-api:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional // jna diff --git a/server/licenses/log4j-jul-2.17.1.jar.sha1 b/server/licenses/log4j-jul-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4afb381a696e9 --- /dev/null +++ b/server/licenses/log4j-jul-2.17.1.jar.sha1 @@ -0,0 +1 @@ +881333b463d47828eda7443b19811763367b1916 \ No newline at end of file diff --git a/server/licenses/log4j-jul-LICENSE.txt b/server/licenses/log4j-jul-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/server/licenses/log4j-jul-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/log4j-jul-NOTICE.txt b/server/licenses/log4j-jul-NOTICE.txt new file mode 100644 index 0000000000000..243a0391fb574 --- /dev/null +++ b/server/licenses/log4j-jul-NOTICE.txt @@ -0,0 +1,20 @@ +Apache Log4j +Copyright 1999-2021 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + +Dumbster SMTP test server +Copyright 2004 Jason Paul Kitchen + +TypeUtil.java +Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +picocli (http://picocli.info) +Copyright 2017 Remko Popma + +TimeoutBlockingWaitStrategy.java and parts of Util.java +Copyright 2011 LMAX Ltd. diff --git a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java index e42727b2aa2af..c0405f9e52b77 100644 --- a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java @@ -259,6 +259,7 @@ private static void configureLoggerLevels(final Settings settings) { */ @SuppressForbidden(reason = "sets system property for logging configuration") private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) { + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); System.setProperty("opensearch.logs.base_path", logsPath.toString()); System.setProperty("opensearch.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value()); System.setProperty("opensearch.logs.node_name", Node.NODE_NAME_SETTING.get(settings)); From d15795a7aca488c1fadb04b3c8d9f1a3b02e4056 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Thu, 13 Oct 2022 01:35:36 +0530 Subject: [PATCH 051/122] Fix weighted routing metadata deserialization error during node restart (#4691) * Fix weighted routing metadata deserialization error during node restart Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../cluster/routing/WeightedRoutingIT.java | 42 +++++++++++++++++++ .../metadata/WeightedRoutingMetadata.java | 2 - 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 28a542206eb5a..09d7cb8ff79df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 6cf8292095c6a..61f82877bf12b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -242,4 +242,46 @@ public void testGetWeightedRouting_WeightsAreSet() throws IOException { assertEquals(weightedRouting, weightedRoutingResponse.weights()); assertEquals("3.0", weightedRoutingResponse.getLocalNodeWeight()); } + + public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + ensureStableCluster(3); + + // routing weights are set in cluster metadata + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + ensureGreen(); + + // Restart a random data node and check that OS process comes healthy + internalCluster().restartRandomDataNode(); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index 27beb21f28f7c..07cdc949c4529 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -90,8 +90,6 @@ public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws Map weights = new HashMap<>(); WeightedRouting weightedRouting = null; XContentParser.Token token; - // move to the first alias - parser.nextToken(); String awarenessField = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { From 431bdeb1bb17090206ed2e26a486b0ddd00f08bb Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 12 Oct 2022 21:03:49 -0700 Subject: [PATCH 052/122] Introduce experimental searchable snapshot API (#4680) This commit adds a new parameter to the snapshot restore API to restore to a new type of "remote snapshot" index where, unlike traditional snapshot restores, the index data is not all downloaded to disk and instead is read on-demand at search time. The feature is functional with this commit, and includes a simple end-to-end integration test, but is far from complete. See tracking issue #2919 for the rest of the work planned/underway. All new capabilities are gated behind a new searchable snapshot feature flag. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 2 + .../snapshots/SearchableSnapshotIT.java | 135 ++++++++++++++ .../restore/RestoreSnapshotRequest.java | 78 +++++++- .../RestoreSnapshotRequestBuilder.java | 8 + .../cluster/routing/RecoverySource.java | 33 +++- .../common/settings/IndexScopedSettings.java | 11 +- .../opensearch/common/util/FeatureFlags.java | 8 + ...ransportNodesListGatewayStartedShards.java | 3 +- .../org/opensearch/index/IndexModule.java | 54 ++++-- .../org/opensearch/index/IndexSettings.java | 24 +++ .../opensearch/index/shard/IndexShard.java | 35 ++-- .../RemoveCorruptedShardDataCommand.java | 3 +- .../index/shard/ShardStateMetadata.java | 47 ++++- .../opensearch/index/shard/StoreRecovery.java | 3 - .../InMemoryRemoteSnapshotDirectory.java | 169 ++++++++++++++++++ .../store/RemoteSnapshotDirectoryFactory.java | 49 +++++ .../opensearch/indices/IndicesService.java | 12 +- .../main/java/org/opensearch/node/Node.java | 20 ++- .../opensearch/snapshots/RestoreService.java | 42 ++++- .../index/shard/IndexShardTests.java | 10 +- .../index/shard/ShardPathTests.java | 15 +- .../index/store/FsDirectoryFactoryTests.java | 4 +- .../test/store/MockFSDirectoryFactory.java | 7 +- 23 files changed, 705 insertions(+), 67 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java create mode 100644 server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d7cb8ff79df..4310103803fbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) +- Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java new file mode 100644 index 0000000000000..96fcf0053c9ab --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.snapshots; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; + +import org.hamcrest.MatcherAssert; +import org.junit.BeforeClass; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.Index; +import org.opensearch.monitor.fs.FsInfo; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; +import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; + +public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue( + "Searchable snapshot feature flag is enabled", + Boolean.parseBoolean(System.getProperty(FeatureFlags.SEARCHABLE_SNAPSHOT)) + ); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCreateSearchableSnapshot() throws Exception { + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + "test-idx-1", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + createIndex( + "test-idx-2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + ensureGreen(); + indexRandomDocs("test-idx-1", 100); + indexRandomDocs("test-idx-2", 100); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx-1", "test-idx-2") + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get().isAcknowledged()); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertDocCount("test-idx-1-copy", 100L); + assertDocCount("test-idx-2-copy", 100L); + assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); + } + + /** + * Picks a shard out of the cluster state for each given index and asserts + * that the 'index' directory does not exist in the node's file system. + * This assertion is digging a bit into the implementation details to + * verify that the Lucene segment files are not copied from the snapshot + * repository to the node's local disk for a remote snapshot index. + */ + private void assertIndexDirectoryDoesNotExist(String... indexNames) { + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (String indexName : indexNames) { + final Index index = state.metadata().index(indexName).getIndex(); + // Get the primary shards for the given index + final GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); + // Randomly pick one of the shards + final List iterators = iterableAsArrayList(shardIterators); + final ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); + final ShardRouting shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertTrue(shardRouting.primary()); + assertTrue(shardRouting.assignedToNode()); + // Get the file system stats for the assigned node + final String nodeId = shardRouting.currentNodeId(); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get(); + for (FsInfo.Path info : nodeStats.getNodes().get(0).getFs()) { + // Build the expected path for the index data for a "normal" + // index and assert it does not exist + final String path = info.getPath(); + final Path file = PathUtils.get(path) + .resolve("indices") + .resolve(index.getUUID()) + .resolve(Integer.toString(shardRouting.getId())) + .resolve("index"); + MatcherAssert.assertThat("Expect file not to exist: " + file, Files.exists(file), is(false)); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 1b673217a248b..3ecf5ab19c0e4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -42,6 +43,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -68,6 +70,38 @@ public class RestoreSnapshotRequest extends ClusterManagerNodeRequest source) { } else { throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } + } else if (name.equals("storage_type")) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT)) { + if (entry.getValue() instanceof String) { + storageType(StorageType.fromString((String) entry.getValue())); + } else { + throw new IllegalArgumentException("malformed storage_type"); + } + } else { + throw new IllegalArgumentException( + "Unsupported parameter " + name + ". Feature flag is not enabled for this experimental feature" + ); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -579,6 +648,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(ignoreIndexSetting); } builder.endArray(); + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && storageType != null) { + storageType.toXContent(builder); + } builder.endObject(); return builder; } @@ -605,7 +677,8 @@ public boolean equals(Object o) { && Objects.equals(renameReplacement, that.renameReplacement) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) - && Objects.equals(snapshotUuid, that.snapshotUuid); + && Objects.equals(snapshotUuid, that.snapshotUuid) + && Objects.equals(storageType, that.storageType); } @Override @@ -621,7 +694,8 @@ public int hashCode() { partial, includeAliases, indexSettings, - snapshotUuid + snapshotUuid, + storageType ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 68397851699fb..0104637a00035 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -248,4 +248,12 @@ public RestoreSnapshotRequestBuilder setIgnoreIndexSettings(List ignoreI request.ignoreIndexSettings(ignoreIndexSettings); return this; } + + /** + * Sets the storage type + */ + public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.StorageType storageType) { + request.storageType(storageType); + return this; + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 539773296ed74..728bf9d1ae90e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -38,6 +38,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -257,12 +258,24 @@ public static class SnapshotRecoverySource extends RecoverySource { private final Snapshot snapshot; private final IndexId index; private final Version version; + private final boolean isSearchableSnapshot; public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { + this(restoreUUID, snapshot, version, indexId, false); + } + + public SnapshotRecoverySource( + String restoreUUID, + Snapshot snapshot, + Version version, + IndexId indexId, + boolean isSearchableSnapshot + ) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); this.index = Objects.requireNonNull(indexId); + this.isSearchableSnapshot = isSearchableSnapshot; } SnapshotRecoverySource(StreamInput in) throws IOException { @@ -274,6 +287,11 @@ public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version ver } else { index = new IndexId(in.readString(), IndexMetadata.INDEX_UUID_NA_VALUE); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + isSearchableSnapshot = in.readBoolean(); + } else { + isSearchableSnapshot = false; + } } public String restoreUUID() { @@ -298,6 +316,10 @@ public Version version() { return version; } + public boolean isSearchableSnapshot() { + return isSearchableSnapshot; + } + @Override protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); @@ -308,6 +330,9 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { } else { out.writeString(index.getName()); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(isSearchableSnapshot); + } } @Override @@ -321,7 +346,8 @@ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params param .field("snapshot", snapshot.getSnapshotId().getName()) .field("version", version.toString()) .field("index", index.getName()) - .field("restoreUUID", restoreUUID); + .field("restoreUUID", restoreUUID) + .field("isSearchableSnapshot", isSearchableSnapshot); } @Override @@ -342,12 +368,13 @@ public boolean equals(Object o) { return restoreUUID.equals(that.restoreUUID) && snapshot.equals(that.snapshot) && index.equals(that.index) - && version.equals(that.version); + && version.equals(that.version) + && isSearchableSnapshot == that.isSearchableSnapshot; } @Override public int hashCode() { - return Objects.hash(restoreUUID, snapshot, index, version); + return Objects.hash(restoreUUID, snapshot, index, version, isSearchableSnapshot); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 7be9adc786f24..079fc38415328 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -221,12 +221,19 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), + List.of(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList( + List.of( IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ), + FeatureFlags.SEARCHABLE_SNAPSHOT, + List.of( + IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY, + IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index fa39dc9ac5aa0..7297479776da9 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -29,6 +29,14 @@ public class FeatureFlags { */ public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + /** + * Gates the functionality of a new parameter to the snapshot restore API + * that allows for creation of a new index type that searches a snapshot + * directly in a remote repository without restoring all index data to disk + * ahead of time. + */ + public static final String SEARCHABLE_SNAPSHOT = "opensearch.experimental.feature.searchable_snapshot.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 5b79ca5970e63..fb114bff9aa12 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -159,7 +159,8 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { nodeEnv.availableShardPaths(request.shardId) ); if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { final String customDataPath; if (request.getCustomDataPath() != null) { customDataPath = request.getCustomDataPath(); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e52a2ba39ed52..2a462f6165184 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,12 +70,14 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.RemoteSnapshotDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -94,6 +96,7 @@ import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; /** * IndexModule represents the central extension point for index level custom implementations like: @@ -390,15 +393,6 @@ IndexEventListener freeze() { // pkg private for testing } } - public static boolean isBuiltinType(String storeType) { - for (Type type : Type.values()) { - if (type.match(storeType)) { - return true; - } - } - return false; - } - /** * Type of file system * @@ -409,7 +403,8 @@ public enum Type { NIOFS("niofs"), MMAPFS("mmapfs"), SIMPLEFS("simplefs"), - FS("fs"); + FS("fs"), + REMOTE_SNAPSHOT("remote_snapshot"); private final String settingsKey; private final boolean deprecated; @@ -426,7 +421,7 @@ public enum Type { private static final Map TYPES; static { - final Map types = new HashMap<>(4); + final Map types = new HashMap<>(values().length); for (final Type type : values()) { types.put(type.settingsKey, type); } @@ -441,6 +436,10 @@ public boolean isDeprecated() { return deprecated; } + static boolean containsSettingsKey(String key) { + return TYPES.containsKey(key); + } + public static Type fromSettingsKey(final String key) { final Type type = TYPES.get(key); if (type == null) { @@ -459,6 +458,13 @@ public boolean match(String setting) { return getSettingsKey().equals(setting); } + /** + * Convenience method to check whether the given IndexSettings contains + * an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. + */ + public boolean match(IndexSettings settings) { + return match(INDEX_STORE_TYPE_SETTING.get(settings.getSettings())); + } } public static Type defaultStoreType(final boolean allowMmap) { @@ -562,7 +568,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { type = defaultStoreType(allowMmap); } else { - if (isBuiltinType(storeType)) { + if (Type.containsSettingsKey(storeType)) { type = Type.fromSettingsKey(storeType); } else { type = null; @@ -572,7 +578,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled"); } final IndexStorePlugin.DirectoryFactory factory; - if (storeType.isEmpty() || isBuiltinType(storeType)) { + if (storeType.isEmpty()) { factory = DEFAULT_DIRECTORY_FACTORY; } else { factory = indexStoreFactories.get(storeType); @@ -641,4 +647,26 @@ private void ensureNotFrozen() { } } + public static Map createBuiltInDirectoryFactories( + Supplier repositoriesService + ) { + final Map factories = new HashMap<>(); + for (Type type : Type.values()) { + switch (type) { + case HYBRIDFS: + case NIOFS: + case FS: + case MMAPFS: + case SIMPLEFS: + factories.put(type.getSettingsKey(), DEFAULT_DIRECTORY_FACTORY); + break; + case REMOTE_SNAPSHOT: + factories.put(type.getSettingsKey(), new RemoteSnapshotDirectoryFactory(repositoriesService)); + break; + default: + throw new IllegalStateException("No directory factory mapping for built-in type " + type); + } + } + return factories; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 00daea147f16f..7648f0a192ce7 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -550,6 +550,30 @@ public final class IndexSettings { Property.Dynamic ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( + "index.searchable_snapshot.repository", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_UUID = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.uuid", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_NAME = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.name", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_INDEX_ID = Setting.simpleString( + "index.searchable_snapshot.index.id", + Property.IndexScope, + Property.InternalIndex + ); + private final Index index; private final Version version; private final Logger logger; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 52ecc5bc66607..3a3c4b19a02f6 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -678,7 +678,7 @@ public void onFailure(Exception e) { this.shardRouting = newRouting; assert this.shardRouting.primary() == false || this.shardRouting.started() == false || // note that we use started and not - // active to avoid relocating shards + // active to avoid relocating shards this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning this.replicationTracker.isPrimaryMode() : "a started primary with non-pending operation term must be in primary mode " + this.shardRouting; @@ -1990,7 +1990,12 @@ public void openEngineAndRecoverFromTranslog() throws IOException { translogRecoveryStats::incrementRecoveredOperations ); }; - loadGlobalCheckpointToReplicationTracker(); + + // Do not load the global checkpoint if this is a remote snapshot index + if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings) == false) { + loadGlobalCheckpointToReplicationTracker(); + } + innerOpenEngineAndTranslog(replicationTracker); getEngine().translogManager() .recoverFromTranslog(translogRecoveryRunner, getEngine().getProcessedLocalCheckpoint(), Long.MAX_VALUE); @@ -3089,13 +3094,18 @@ public void startRecovery( } break; case SNAPSHOT: - final String repo = ((SnapshotRecoverySource) recoveryState.getRecoverySource()).snapshot().getRepository(); - executeRecovery( - "from snapshot", - recoveryState, - recoveryListener, - l -> restoreFromRepository(repositoriesService.repository(repo), l) - ); + final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource(); + if (recoverySource.isSearchableSnapshot()) { + executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore); + } else { + final String repo = recoverySource.snapshot().getRepository(); + executeRecovery( + "from snapshot", + recoveryState, + recoveryListener, + l -> restoreFromRepository(repositoriesService.repository(repo), l) + ); + } break; case LOCAL_SHARDS: final IndexMetadata indexMetadata = indexSettings().getIndexMetadata(); @@ -3256,10 +3266,15 @@ private static void persistMetadata( writeReason = "routing changed from " + currentRouting + " to " + newRouting; } logger.trace("{} writing shard state, reason [{}]", shardId, writeReason); + + final ShardStateMetadata.IndexDataLocation indexDataLocation = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.exists( + indexSettings.getSettings() + ) ? ShardStateMetadata.IndexDataLocation.REMOTE : ShardStateMetadata.IndexDataLocation.LOCAL; final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( newRouting.primary(), indexSettings.getUUID(), - newRouting.allocationId() + newRouting.allocationId(), + indexDataLocation ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardPath.getShardStatePath()); } else { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index ccc620fc8cf64..c7e380f842fa0 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -484,7 +484,8 @@ private void newAllocationId(ShardPath shardPath, Terminal terminal) throws IOEx final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( shardStateMetadata.primary, shardStateMetadata.indexUUID, - newAllocationId + newAllocationId, + ShardStateMetadata.IndexDataLocation.LOCAL ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardStatePath); diff --git a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java index 9cd9149cda913..9e334bc6ffd54 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java @@ -56,17 +56,39 @@ public final class ShardStateMetadata { private static final String PRIMARY_KEY = "primary"; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String ALLOCATION_ID_KEY = "allocation_id"; + private static final String INDEX_DATA_LOCATION_KEY = "index_data_location"; + + /** + * Enumeration of types of data locations for an index + */ + public enum IndexDataLocation { + /** + * Indicates index data is on the local disk + */ + LOCAL, + /** + * Indicates index data is remote, such as for a searchable snapshot + * index + */ + REMOTE + } public final String indexUUID; public final boolean primary; @Nullable public final AllocationId allocationId; // can be null if we read from legacy format (see fromXContent and MultiDataPathUpgrader) + public final IndexDataLocation indexDataLocation; public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId) { + this(primary, indexUUID, allocationId, IndexDataLocation.LOCAL); + } + + public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId, IndexDataLocation indexDataLocation) { assert indexUUID != null; this.primary = primary; this.indexUUID = indexUUID; this.allocationId = allocationId; + this.indexDataLocation = Objects.requireNonNull(indexDataLocation); } @Override @@ -89,6 +111,9 @@ public boolean equals(Object o) { if (Objects.equals(allocationId, that.allocationId) == false) { return false; } + if (Objects.equals(indexDataLocation, that.indexDataLocation) == false) { + return false; + } return true; } @@ -98,17 +123,16 @@ public int hashCode() { int result = indexUUID.hashCode(); result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); + result = 31 * result + indexDataLocation.hashCode(); return result; } @Override public String toString() { - return "primary [" + primary + "], allocation [" + allocationId + "]"; + return "primary [" + primary + "], allocation [" + allocationId + "], index data location [" + indexDataLocation + "]"; } - public static final MetadataStateFormat FORMAT = new MetadataStateFormat( - SHARD_STATE_FILE_PREFIX - ) { + public static final MetadataStateFormat FORMAT = new MetadataStateFormat<>(SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { @@ -124,6 +148,11 @@ public void toXContent(XContentBuilder builder, ShardStateMetadata shardStateMet if (shardStateMetadata.allocationId != null) { builder.field(ALLOCATION_ID_KEY, shardStateMetadata.allocationId); } + // Omit the index data location field if it is LOCAL (the implicit default) + // to maintain compatibility for local indices + if (shardStateMetadata.indexDataLocation != IndexDataLocation.LOCAL) { + builder.field(INDEX_DATA_LOCATION_KEY, shardStateMetadata.indexDataLocation); + } } @Override @@ -136,6 +165,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException String currentFieldName = null; String indexUUID = IndexMetadata.INDEX_UUID_NA_VALUE; AllocationId allocationId = null; + IndexDataLocation indexDataLocation = IndexDataLocation.LOCAL; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -144,6 +174,13 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException primary = parser.booleanValue(); } else if (INDEX_UUID_KEY.equals(currentFieldName)) { indexUUID = parser.text(); + } else if (INDEX_DATA_LOCATION_KEY.equals(currentFieldName)) { + final String stringValue = parser.text(); + try { + indexDataLocation = IndexDataLocation.valueOf(stringValue); + } catch (IllegalArgumentException e) { + throw new CorruptStateException("unexpected value for data location [" + stringValue + "]"); + } } else { throw new CorruptStateException("unexpected field in shard state [" + currentFieldName + "]"); } @@ -160,7 +197,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException if (primary == null) { throw new CorruptStateException("missing value for [primary] in shard state"); } - return new ShardStateMetadata(primary, indexUUID, allocationId); + return new ShardStateMetadata(primary, indexUUID, allocationId, indexDataLocation); } }; } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 06916c4cc87fe..6ca5036808818 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -104,9 +104,6 @@ final class StoreRecovery { */ void recoverFromStore(final IndexShard indexShard, ActionListener listener) { if (canRecover(indexShard)) { - RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); - assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE - : "expected store recovery type but was: " + recoveryType; ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { logger.debug("starting recovery from store ..."); internalRecoverFromStore(indexShard); diff --git a/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java new file mode 100644 index 0000000000000..0757d88a4099a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java @@ -0,0 +1,169 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.NoLockFactory; +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Trivial in-memory implementation of a Directory that reads from a snapshot + * in a repository. This is functional but is only temporary to demonstrate + * functional searchable snapshot functionality. The proper implementation will + * be implemented per https://github.com/opensearch-project/OpenSearch/issues/3114. + * + * @opensearch.internal + */ +public final class InMemoryRemoteSnapshotDirectory extends Directory { + + private final BlobStoreRepository blobStoreRepository; + private final SnapshotId snapshotId; + private final BlobPath blobPath; + private final SetOnce blobContainer = new SetOnce<>(); + private final SetOnce> fileInfoMap = new SetOnce<>(); + + public InMemoryRemoteSnapshotDirectory(BlobStoreRepository blobStoreRepository, BlobPath blobPath, SnapshotId snapshotId) { + this.blobStoreRepository = blobStoreRepository; + this.snapshotId = snapshotId; + this.blobPath = blobPath; + } + + @Override + public String[] listAll() throws IOException { + return fileInfoMap().keySet().toArray(new String[0]); + } + + @Override + public void deleteFile(String name) throws IOException {} + + @Override + public IndexOutput createOutput(String name, IOContext context) { + return NoopIndexOutput.INSTANCE; + } + + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfoMap().get(name); + + // Virtual files are contained entirely in the metadata hash field + if (fileInfo.name().startsWith("v__")) { + return new ByteArrayIndexInput(name, fileInfo.metadata().hash().bytes); + } + + try (InputStream is = blobContainer().readBlob(fileInfo.name())) { + return new ByteArrayIndexInput(name, is.readAllBytes()); + } + } + + @Override + public void close() throws IOException {} + + @Override + public long fileLength(String name) throws IOException { + initializeIfNecessary(); + return fileInfoMap.get().get(name).length(); + } + + @Override + public Set getPendingDeletions() throws IOException { + return Collections.emptySet(); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public void sync(Collection names) throws IOException {} + + @Override + public void syncMetaData() {} + + @Override + public void rename(String source, String dest) throws IOException {} + + @Override + public Lock obtainLock(String name) throws IOException { + return NoLockFactory.INSTANCE.obtainLock(null, null); + } + + static class NoopIndexOutput extends IndexOutput { + + final static NoopIndexOutput INSTANCE = new NoopIndexOutput(); + + NoopIndexOutput() { + super("noop", "noop"); + } + + @Override + public void close() throws IOException { + + } + + @Override + public long getFilePointer() { + return 0; + } + + @Override + public long getChecksum() throws IOException { + return 0; + } + + @Override + public void writeByte(byte b) throws IOException { + + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + + } + } + + private BlobContainer blobContainer() { + initializeIfNecessary(); + return blobContainer.get(); + } + + private Map fileInfoMap() { + initializeIfNecessary(); + return fileInfoMap.get(); + } + + /** + * Bit of a hack to lazily initialize the blob store to avoid running afoul + * of the assertion in {@code BlobStoreRepository#assertSnapshotOrGenericThread}. + */ + private void initializeIfNecessary() { + if (blobContainer.get() == null || fileInfoMap.get() == null) { + blobContainer.set(blobStoreRepository.blobStore().blobContainer(blobPath)); + final BlobStoreIndexShardSnapshot snapshot = blobStoreRepository.loadShardSnapshot(blobContainer.get(), snapshotId); + fileInfoMap.set( + snapshot.indexFiles().stream().collect(Collectors.toMap(BlobStoreIndexShardSnapshot.FileInfo::physicalName, f -> f)) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java new file mode 100644 index 0000000000000..bf7806b836b65 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java @@ -0,0 +1,49 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.util.function.Supplier; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Factory for a Directory implementation that can read directly from index + * data stored remotely in a repository. + * + * @opensearch.internal + */ +public final class RemoteSnapshotDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + private final Supplier repositoriesService; + + public RemoteSnapshotDirectoryFactory(Supplier repositoriesService) { + this.repositoriesService = repositoriesService; + } + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + final String repositoryName = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.get(indexSettings.getSettings()); + final Repository repository = repositoriesService.get().repository(repositoryName); + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + final BlobPath blobPath = new BlobPath().add("indices") + .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) + .add(Integer.toString(shardPath.getShardId().getId())); + final SnapshotId snapshotId = new SnapshotId( + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.get(indexSettings.getSettings()), + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.get(indexSettings.getSettings()) + ); + return new InMemoryRemoteSnapshotDirectory(blobStoreRepository, blobPath, snapshotId); + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index f2961c0f3b13d..b2f48ccdd389c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -109,6 +109,7 @@ import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; +import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; import org.opensearch.index.get.GetStats; @@ -130,6 +131,7 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.translog.TranslogStats; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -336,13 +338,6 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; - // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { - if (IndexModule.isBuiltinType(indexStoreType)) { - throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); - } - } - this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests @@ -769,6 +764,9 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isSegRepEnabled()) { return new NRTReplicationEngineFactory(); } + if (IndexModule.Type.REMOTE_SNAPSHOT.match(idxSettings)) { + return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 24300c884d194..504550378e14f 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.SetOnce; import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; @@ -213,6 +214,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -604,11 +606,23 @@ protected Node( .map(plugin -> (Function>) plugin::getEngineFactory) .collect(Collectors.toList()); - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + final Map builtInDirectoryFactories = IndexModule.createBuiltInDirectoryFactories( + repositoriesServiceReference::get + ); + final Map directoryFactories = new HashMap<>(); + pluginsService.filterPlugins(IndexStorePlugin.class) .stream() .map(IndexStorePlugin::getDirectoryFactories) .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + .forEach((k, v) -> { + // do not allow any plugin-provided index store type to conflict with a built-in type + if (builtInDirectoryFactories.containsKey(k)) { + throw new IllegalStateException("registered index store type [" + k + "] conflicts with a built-in type"); + } + directoryFactories.put(k, v); + }); + directoryFactories.putAll(builtInDirectoryFactories); final Map recoveryStateFactories = pluginsService.filterPlugins( IndexStorePlugin.class @@ -653,7 +667,7 @@ protected Node( client, metaStateService, engineFactoryProviders, - indexStoreFactories, + Map.copyOf(directoryFactories), searchModule.getValuesSourceRegistry(), recoveryStateFactories, remoteDirectoryFactory diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index ca5078c4d1c56..60c01d0b04639 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -84,7 +84,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -421,20 +423,30 @@ public ClusterState execute(ClusterState currentState) { .getMaxNodeVersion() .minimumIndexCompatibilityVersion(); for (Map.Entry indexEntry : indices.entrySet()) { + String renamedIndexName = indexEntry.getKey(); String index = indexEntry.getValue(); boolean partial = checkPartial(index); - SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( + + IndexMetadata snapshotIndexMetadata = updateIndexSettings( + metadata.index(index), + request.indexSettings(), + request.ignoreIndexSettings() + ); + final boolean isSearchableSnapshot = FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) + && IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(request.storageType().toString()); + if (isSearchableSnapshot) { + snapshotIndexMetadata = addSnapshotToIndexSettings( + snapshotIndexMetadata, + snapshot, + repositoryData.resolveIndexId(index) + ); + } + final SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( restoreUUID, snapshot, snapshotInfo.version(), - repositoryData.resolveIndexId(index) - ); - String renamedIndexName = indexEntry.getKey(); - IndexMetadata snapshotIndexMetadata = metadata.index(index); - snapshotIndexMetadata = updateIndexSettings( - snapshotIndexMetadata, - request.indexSettings(), - request.ignoreIndexSettings() + repositoryData.resolveIndexId(index), + isSearchableSnapshot ); try { snapshotIndexMetadata = metadataIndexUpgradeService.upgradeIndexMetadata( @@ -1207,4 +1219,16 @@ public void applyClusterState(ClusterChangedEvent event) { logger.warn("Failed to update restore state ", t); } } + + private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, Snapshot snapshot, IndexId indexId) { + final Settings newSettings = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.getKey(), snapshot.getRepository()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.getKey(), indexId.getId()) + .put(metadata.getSettings()) + .build(); + return IndexMetadata.builder(metadata).settings(newSettings).build(); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 27c0437236f63..470eeea771f2b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -327,7 +327,8 @@ public void testShardStateMetaHashCodeEquals() { ShardStateMetadata meta = new ShardStateMetadata( randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), - allocationId + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) ); assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId)); @@ -339,7 +340,12 @@ public void testShardStateMetaHashCodeEquals() { Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); - meta = new ShardStateMetadata(randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); + meta = new ShardStateMetadata( + randomBoolean(), + randomRealisticUnicodeOfCodepointLengthBetween(1, 10), + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) + ); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java index beda468b45fb0..25ec7c7987855 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.WriteStateException; import org.opensearch.index.Index; import org.opensearch.test.OpenSearchTestCase; @@ -50,7 +51,7 @@ public void testLoadShardPath() throws IOException { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, ""); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); @@ -66,7 +67,7 @@ public void testFailLoadShardPathOnMultiState() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), paths); + writeShardStateMetadata(indexUUID, paths); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("more than one shard state found")); } @@ -77,7 +78,7 @@ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { ShardId shardId = new ShardId("foo", "foobar", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } @@ -121,7 +122,7 @@ public void testGetRootPaths() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), path); + writeShardStateMetadata(indexUUID, path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, customDataPath); boolean found = false; for (Path p : env.nodeDataPaths()) { @@ -148,4 +149,10 @@ public void testGetRootPaths() throws IOException { } } + private static void writeShardStateMetadata(String indexUUID, Path... paths) throws WriteStateException { + ShardStateMetadata.FORMAT.writeAndCleanup( + new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing(), ShardStateMetadata.IndexDataLocation.LOCAL), + paths + ); + } } diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index cf8d6677b4227..ce40de0e9aa71 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -57,6 +57,8 @@ import java.util.Arrays; import java.util.Locale; +import static org.opensearch.test.store.MockFSDirectoryFactory.FILE_SYSTEM_BASED_STORE_TYPES; + public class FsDirectoryFactoryTests extends OpenSearchTestCase { public void testPreload() throws IOException { @@ -170,7 +172,7 @@ public void testStoreDirectory() throws IOException { // default doTestStoreDirectory(tempDir, null, IndexModule.Type.FS); // explicit directory impls - for (IndexModule.Type type : IndexModule.Type.values()) { + for (IndexModule.Type type : FILE_SYSTEM_BASED_STORE_TYPES) { doTestStoreDirectory(tempDir, type.name().toLowerCase(Locale.ROOT), type); } } diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java index 47952af1cd06c..e38b62c419334 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java @@ -63,10 +63,15 @@ import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; import java.util.Random; import java.util.Set; +import java.util.stream.Collectors; public class MockFSDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + public static final List FILE_SYSTEM_BASED_STORE_TYPES = Arrays.stream(IndexModule.Type.values()) + .filter(t -> (t == IndexModule.Type.REMOTE_SNAPSHOT) == false) + .collect(Collectors.toUnmodifiableList()); public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting( "index.store.mock.random.io_exception_rate_on_open", @@ -168,7 +173,7 @@ private Directory randomDirectoryService(Random random, IndexSettings indexSetti .put(indexSettings.getIndexMetadata().getSettings()) .put( IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey() + RandomPicks.randomFrom(random, FILE_SYSTEM_BASED_STORE_TYPES).getSettingsKey() ) ) .build(); From 89550c0a9123ab808779449839d6f63560d0412d Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Thu, 13 Oct 2022 17:24:38 +0530 Subject: [PATCH 053/122] Recommission api level support (#4604) * Add changes for Recommission API Signed-off-by: pranikum <109206473+pranikum@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../client/RestHighLevelClientTests.java | 3 +- ...cluster.delete_decommission_awareness.json | 19 ++++ .../org/opensearch/action/ActionModule.java | 5 ++ .../delete/DeleteDecommissionStateAction.java | 25 ++++++ .../DeleteDecommissionStateRequest.java | 40 +++++++++ ...DeleteDecommissionStateRequestBuilder.java | 27 ++++++ .../DeleteDecommissionStateResponse.java | 36 ++++++++ ...ransportDeleteDecommissionStateAction.java | 86 +++++++++++++++++++ .../awareness/delete/package-info.java | 10 +++ .../opensearch/client/ClusterAdminClient.java | 17 ++++ .../java/org/opensearch/client/Requests.java | 8 ++ .../client/support/AbstractClient.java | 21 +++++ .../decommission/DecommissionService.java | 8 +- .../RestDeleteDecommissionStateAction.java | 52 +++++++++++ .../DeleteDecommissionStateRequestTests.java | 32 +++++++ .../DeleteDecommissionStateResponseTests.java | 29 +++++++ .../DecommissionServiceTests.java | 10 +-- ...estDeleteDecommissionStateActionTests.java | 40 +++++++++ 19 files changed, 459 insertions(+), 11 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java create mode 100644 server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4310103803fbe..346bf432e663b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - +- Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 9086fc9563c57..ab9982657a410 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -890,7 +890,8 @@ public void testApiNamingConventions() throws Exception { "cluster.put_weighted_routing", "cluster.get_weighted_routing", "cluster.put_decommission_awareness", - "cluster.get_decommission_awareness", }; + "cluster.get_decommission_awareness", + "cluster.delete_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json new file mode 100644 index 0000000000000..13ea101169e60 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Delete any existing decommission." + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index d959d6828a46b..c2c11d69f134c 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -42,6 +42,8 @@ import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; @@ -310,6 +312,7 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; @@ -699,6 +702,7 @@ public void reg // Decommission actions actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + actions.register(DeleteDecommissionStateAction.INSTANCE, TransportDeleteDecommissionStateAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -879,6 +883,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeletePitAction()); registerHandler.accept(new RestGetAllPitsAction(nodesInCluster)); registerHandler.accept(new RestPitSegmentsAction(nodesInCluster)); + registerHandler.accept(new RestDeleteDecommissionStateAction()); for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..3aff666d388be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionType; + +/** + * Delete decommission state action. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateAction extends ActionType { + public static final DeleteDecommissionStateAction INSTANCE = new DeleteDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/delete"; + + private DeleteDecommissionStateAction() { + super(NAME, DeleteDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java new file mode 100644 index 0000000000000..205be54a36c33 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for deleting decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { + + public DeleteDecommissionStateRequest() {} + + public DeleteDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..08f194c53f18e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Builder for Delete decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse, + DeleteDecommissionStateRequestBuilder> { + + public DeleteDecommissionStateRequestBuilder(OpenSearchClient client, DeleteDecommissionStateAction action) { + super(client, action, new DeleteDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java new file mode 100644 index 0000000000000..2ff634966586a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response returned after deletion of decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateResponse extends AcknowledgedResponse { + + public DeleteDecommissionStateResponse(StreamInput in) throws IOException { + super(in); + } + + public DeleteDecommissionStateResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..7d8f4bdd8304c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete decommission. + * + * @opensearch.internal + */ +public class TransportDeleteDecommissionStateAction extends TransportClusterManagerNodeAction< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionStateAction.class); + private final DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DecommissionService decommissionService + ) { + super( + DeleteDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionStateRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionStateResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionStateResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + DeleteDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) { + logger.info("Received delete decommission Request [{}]", request); + this.decommissionService.startRecommissionAction(listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java new file mode 100644 index 0000000000000..c2cfc03baa45e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness.delete; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index c811b788d9cf6..5551c45ba9e57 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -37,6 +37,9 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; @@ -864,4 +867,18 @@ public interface ClusterAdminClient extends OpenSearchClient { */ GetDecommissionStateRequestBuilder prepareGetDecommission(); + /** + * Deletes the decommission metadata. + */ + ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request); + + /** + * Deletes the decommission metadata. + */ + void deleteDecommissionState(DeleteDecommissionStateRequest request, ActionListener listener); + + /** + * Deletes the decommission metadata. + */ + DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest(); } diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index c8729a0d498f4..87219dda49825 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -32,6 +32,7 @@ package org.opensearch.client; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -588,4 +589,11 @@ public static DecommissionRequest decommissionRequest() { public static GetDecommissionStateRequest getDecommissionStateRequest() { return new GetDecommissionStateRequest(); } + + /** + * Creates a new delete decommission request. + */ + public static DeleteDecommissionStateRequest deleteDecommissionStateRequest() { + return new DeleteDecommissionStateRequest(); + } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 65199b22e8e72..bd82299725435 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -43,6 +43,10 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; @@ -1405,6 +1409,23 @@ public GetDecommissionStateRequestBuilder prepareGetDecommission() { return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); } + @Override + public ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request) { + return execute(DeleteDecommissionStateAction.INSTANCE, request); + } + + @Override + public void deleteDecommissionState( + DeleteDecommissionStateRequest request, + ActionListener listener + ) { + execute(DeleteDecommissionStateAction.INSTANCE, request, listener); + } + + @Override + public DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest() { + return new DeleteDecommissionStateRequestBuilder(this, DeleteDecommissionStateAction.INSTANCE); + } } static class IndicesAdmin implements IndicesAdminClient { diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 5def2733b5ded..b2c8bfbc0cdc8 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -13,8 +13,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -479,7 +479,7 @@ public void onFailure(Exception e) { }; } - public void startRecommissionAction(final ActionListener listener) { + public void startRecommissionAction(final ActionListener listener) { /* * For abandoned requests, we might not really know if it actually restored the exclusion list. * And can land up in cases where even after recommission, exclusions are set(which is unexpected). @@ -502,7 +502,7 @@ public void onFailure(Exception e) { }, false); } - void deleteDecommissionState(ActionListener listener) { + void deleteDecommissionState(ActionListener listener) { clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -523,7 +523,7 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { // Cluster state processed for deleting the decommission attribute. assert newState.metadata().decommissionAttributeMetadata() == null; - listener.onResponse(new AcknowledgedResponse(true)); + listener.onResponse(new DeleteDecommissionStateResponse(true)); } }); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..9fd7ae2248c30 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Clears the decommission metadata. + * + * @opensearch.api + */ +public class RestDeleteDecommissionStateAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/decommission/awareness")); + } + + @Override + public String getName() { + return "delete_decommission_state_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteDecommissionStateRequest deleteDecommissionStateRequest = createRequest(); + return channel -> client.admin() + .cluster() + .deleteDecommissionState(deleteDecommissionStateRequest, new RestToXContentListener<>(channel)); + } + + DeleteDecommissionStateRequest createRequest() { + return Requests.deleteDecommissionStateRequest(); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java new file mode 100644 index 0000000000000..1a95b77cc1024 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateRequest originalRequest = new DeleteDecommissionStateRequest(); + + final DeleteDecommissionStateRequest cloneRequest; + try (BytesStreamOutput out = new BytesStreamOutput()) { + originalRequest.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + cloneRequest = new DeleteDecommissionStateRequest(in); + } + } + assertEquals(cloneRequest.clusterManagerNodeTimeout(), originalRequest.clusterManagerNodeTimeout()); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java new file mode 100644 index 0000000000000..085eda3e9d0e7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateResponse originalResponse = new DeleteDecommissionStateResponse(true); + + final DeleteDecommissionStateResponse deserialized = copyWriteable( + originalResponse, + writableRegistry(), + DeleteDecommissionStateResponse::new + ); + assertEquals(deserialized, originalResponse); + + } +} diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 840ce1634a68e..7dee51b7713f9 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -15,9 +15,9 @@ import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -217,9 +217,9 @@ public void testClearClusterDecommissionState() throws InterruptedException { .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) .build(); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse decommissionResponse) { + public void onResponse(DeleteDecommissionStateResponse decommissionResponse) { DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); assertNull(metadata); countDownLatch.countDown(); @@ -268,9 +268,9 @@ public void testDeleteDecommissionAttributeClearVotingExclusion() { public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); - ActionListener listener = new ActionListener<>() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse response) { + public void onResponse(DeleteDecommissionStateResponse response) { assertTrue(response.isAcknowledged()); assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); countDownLatch.countDown(); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java new file mode 100644 index 0000000000000..01f988efdf6eb --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.junit.Before; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.RestActionTestCase; + +import java.util.List; + +public class RestDeleteDecommissionStateActionTests extends RestActionTestCase { + + private RestDeleteDecommissionStateAction action; + + @Before + public void setupAction() { + action = new RestDeleteDecommissionStateAction(); + controller().registerHandler(action); + } + + public void testRoutes() { + List routes = action.routes(); + RestHandler.Route route = routes.get(0); + assertEquals(route.getMethod(), RestRequest.Method.DELETE); + assertEquals("/_cluster/decommission/awareness", route.getPath()); + } + + public void testCreateRequest() { + DeleteDecommissionStateRequest request = action.createRequest(); + assertNotNull(request); + } +} From 1a7aabaf6c28e9e633090673ba4c55a1cdd904b5 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 13 Oct 2022 10:03:26 -0700 Subject: [PATCH 054/122] Fix assertion in test of ShardStateMetadata (#4774) This test randomly fails if the enum selected isn't the same as the one choosen by the legacy constructor. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../index/shard/IndexShardTests.java | 25 ++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 346bf432e663b..95aa3e10a39fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -128,6 +128,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) +- Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 470eeea771f2b..72b77bb706065 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -331,12 +331,26 @@ public void testShardStateMetaHashCodeEquals() { randomFrom(ShardStateMetadata.IndexDataLocation.values()) ); - assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId)); - assertEquals(meta.hashCode(), new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId).hashCode()); + assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation)); + assertEquals( + meta.hashCode(), + new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation).hashCode() + ); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID, meta.allocationId))); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", meta.allocationId))); - assertFalse(meta.equals(new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", randomAllocationId()))); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID, meta.allocationId, meta.indexDataLocation)); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID + "foo", meta.allocationId, meta.indexDataLocation)); + assertNotEquals(meta, new ShardStateMetadata(!meta.primary, meta.indexUUID, randomAllocationId(), meta.indexDataLocation)); + assertNotEquals( + meta, + new ShardStateMetadata( + !meta.primary, + meta.indexUUID, + randomAllocationId(), + meta.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL + ? ShardStateMetadata.IndexDataLocation.REMOTE + : ShardStateMetadata.IndexDataLocation.LOCAL + ) + ); Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); @@ -349,7 +363,6 @@ public void testShardStateMetaHashCodeEquals() { hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); - } public void testClosesPreventsNewOperations() throws Exception { From 18f1fa36a7722391c88db80e41ded2027271f6d9 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Thu, 13 Oct 2022 23:42:16 +0530 Subject: [PATCH 055/122] Delete API for weighted round robin search routing (#4400) * Delete API for weighted round robin search routing Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../client/RestHighLevelClientTests.java | 1 + .../api/cluster.delete_weighted_routing.json | 19 +++ .../cluster/routing/WeightedRoutingIT.java | 64 +++++++ .../search/SearchWeightedRoutingIT.java | 161 ++++++++++++++++++ .../org/opensearch/action/ActionModule.java | 6 + .../ClusterDeleteWeightedRoutingAction.java | 25 +++ .../ClusterDeleteWeightedRoutingRequest.java | 44 +++++ ...erDeleteWeightedRoutingRequestBuilder.java | 27 +++ .../ClusterDeleteWeightedRoutingResponse.java | 37 ++++ .../TransportDeleteWeightedRoutingAction.java | 86 ++++++++++ .../routing/weighted/delete/package-info.java | 10 ++ .../opensearch/client/ClusterAdminClient.java | 18 ++ .../java/org/opensearch/client/Requests.java | 10 ++ .../client/support/AbstractClient.java | 22 +++ .../routing/WeightedRoutingService.java | 30 ++++ ...estClusterDeleteWeightedRoutingAction.java | 53 ++++++ .../routing/WeightedRoutingServiceTests.java | 28 +++ 18 files changed, 642 insertions(+) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 95aa3e10a39fd..8e580b2b101af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - GET api for weighted shard routing([#4275](https://github.com/opensearch-project/OpenSearch/pull/4275/)) - Unmute test RelocationIT.testRelocationWhileIndexingRandom ([#4580](https://github.com/opensearch-project/OpenSearch/pull/4580)) - Add DecommissionService and helper to execute awareness attribute decommissioning ([#4084](https://github.com/opensearch-project/OpenSearch/pull/4084)) +- Delete api for weighted shard routing([#4400](https://github.com/opensearch-project/OpenSearch/pull/4400/)) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index ab9982657a410..d522bf8c4d005 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -889,6 +889,7 @@ public void testApiNamingConventions() throws Exception { "remote_store.restore", "cluster.put_weighted_routing", "cluster.get_weighted_routing", + "cluster.delete_weighted_routing", "cluster.put_decommission_awareness", "cluster.get_decommission_awareness", "cluster.delete_decommission_awareness", }; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json new file mode 100644 index 0000000000000..2cd4081b645e8 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_weighted_routing": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/delete", + "description": "Delete weighted shard routing weights" + }, + "stability": "stable", + "url": { + "paths": [ + { + "path": "/_cluster/routing/awareness/weights", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 61f82877bf12b..bba07d878a42c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.routing; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; import org.opensearch.common.settings.Settings; @@ -284,4 +285,67 @@ public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { ensureGreen(); assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); } + + public void testDeleteWeightedRouting_WeightsNotSet() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // delete weighted routing metadata + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertTrue(deleteResponse.isAcknowledged()); + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } + + public void testDeleteWeightedRouting_WeightsAreSet() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // delete weighted routing metadata + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertTrue(deleteResponse.isAcknowledged()); + assertNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java new file mode 100644 index 0000000000000..097775b7ab4ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.Assert; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.search.stats.SearchStats; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) +public class SearchWeightedRoutingIT extends OpenSearchIntegTestCase { + @Override + protected int numberOfReplicas() { + return 2; + } + + public void testSearchWithWRRShardRouting() throws IOException { + Settings commonSettings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .build(); + + logger.info("--> starting 6 nodes on different zones"); + List nodes = internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + String A_0 = nodes.get(0); + String B_0 = nodes.get(1); + String B_1 = nodes.get(2); + String A_1 = nodes.get(3); + String C_0 = nodes.get(4); + String C_1 = nodes.get(5); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("6").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 10).put("index.number_of_replicas", 2)) + ); + ensureGreen(); + logger.info("--> creating indices for test"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + } + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + Set hitNodes = new HashSet<>(); + // making search requests + for (int i = 0; i < 50; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(A_0, A_1, B_0, B_1)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } + // search should not go to nodes in zone c + assertThat(hitNodes.size(), lessThanOrEqualTo(4)); + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + List nodeIdsFromZoneWithWeightZero = new ArrayList<>(); + for (DiscoveryNode node : dataNodes) { + if (node.getAttributes().get("zone").equals("c")) { + nodeIdsFromZoneWithWeightZero.add(node.getId()); + } + } + for (String nodeId : nodeIdsFromZoneWithWeightZero) { + assertFalse(hitNodes.contains(nodeId)); + } + + NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + for (NodeStats stat : nodeStats.getNodes()) { + SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (stat.getNode().getAttributes().get("zone").equals("c")) { + assertEquals(0, searchStats.getQueryCount()); + assertEquals(0, searchStats.getFetchCount()); + + } else { + Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L); + } + } + + logger.info("--> deleted shard routing weights for weighted round robin"); + + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().get(); + assertEquals(deleteResponse.isAcknowledged(), true); + + hitNodes = new HashSet<>(); + // making search requests + for (int i = 0; i < 100; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(A_0, A_1, B_0, B_1)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } + + // Check shard routing requests hit data nodes in zone c + for (String nodeId : nodeIdsFromZoneWithWeightZero) { + assertFalse(!hitNodes.contains(nodeId)); + } + nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + for (NodeStats stat : nodeStats.getNodes()) { + SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index c2c11d69f134c..84bc9b395c5dc 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -85,6 +85,8 @@ import org.opensearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.opensearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.TransportDeleteWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.TransportGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; @@ -302,6 +304,7 @@ import org.opensearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction; import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; +import org.opensearch.rest.action.admin.cluster.RestClusterDeleteWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetSettingsAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetWeightedRoutingAction; import org.opensearch.rest.action.admin.cluster.RestClusterHealthAction; @@ -580,6 +583,7 @@ public void reg actions.register(ClusterAddWeightedRoutingAction.INSTANCE, TransportAddWeightedRoutingAction.class); actions.register(ClusterGetWeightedRoutingAction.INSTANCE, TransportGetWeightedRoutingAction.class); + actions.register(ClusterDeleteWeightedRoutingAction.INSTANCE, TransportDeleteWeightedRoutingAction.class); actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); @@ -766,8 +770,10 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCloseIndexAction()); registerHandler.accept(new RestOpenIndexAction()); registerHandler.accept(new RestAddIndexBlockAction()); + registerHandler.accept(new RestClusterPutWeightedRoutingAction()); registerHandler.accept(new RestClusterGetWeightedRoutingAction()); + registerHandler.accept(new RestClusterDeleteWeightedRoutingAction()); registerHandler.accept(new RestUpdateSettingsAction()); registerHandler.accept(new RestGetSettingsAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..aa438cd31b934 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.ActionType; + +/** + * Action to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingAction extends ActionType { + public static final ClusterDeleteWeightedRoutingAction INSTANCE = new ClusterDeleteWeightedRoutingAction(); + public static final String NAME = "cluster:admin/routing/awareness/weights/delete"; + + private ClusterDeleteWeightedRoutingAction() { + super(NAME, ClusterDeleteWeightedRoutingResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java new file mode 100644 index 0000000000000..71eab8ff35a2d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingRequest extends ClusterManagerNodeRequest { + public ClusterDeleteWeightedRoutingRequest() {} + + public ClusterDeleteWeightedRoutingRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + + @Override + public String toString() { + return "ClusterDeleteWeightedRoutingRequest"; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java new file mode 100644 index 0000000000000..19976ac6b07aa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Request builder to delete weights for weighted round-robin shard routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + ClusterDeleteWeightedRoutingRequest, + ClusterDeleteWeightedRoutingResponse, + ClusterDeleteWeightedRoutingRequestBuilder> { + + public ClusterDeleteWeightedRoutingRequestBuilder(OpenSearchClient client, ClusterDeleteWeightedRoutingAction action) { + super(client, action, new ClusterDeleteWeightedRoutingRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java new file mode 100644 index 0000000000000..b98ac6c0c55be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response from deleting weights for weighted round-robin search routing policy. + * + * @opensearch.internal + */ +public class ClusterDeleteWeightedRoutingResponse extends AcknowledgedResponse { + + ClusterDeleteWeightedRoutingResponse(StreamInput in) throws IOException { + super(in); + } + + public ClusterDeleteWeightedRoutingResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..8f88d8af71b70 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.WeightedRoutingService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for deleting weights for weighted round-robin search routing policy + * + * @opensearch.internal + */ +public class TransportDeleteWeightedRoutingAction extends TransportClusterManagerNodeAction< + ClusterDeleteWeightedRoutingRequest, + ClusterDeleteWeightedRoutingResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteWeightedRoutingAction.class); + + private final WeightedRoutingService weightedRoutingService; + + @Inject + public TransportDeleteWeightedRoutingAction( + TransportService transportService, + ClusterService clusterService, + WeightedRoutingService weightedRoutingService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + ClusterDeleteWeightedRoutingAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ClusterDeleteWeightedRoutingRequest::new, + indexNameExpressionResolver + ); + this.weightedRoutingService = weightedRoutingService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterDeleteWeightedRoutingResponse read(StreamInput in) throws IOException { + return new ClusterDeleteWeightedRoutingResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(ClusterDeleteWeightedRoutingRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + ClusterDeleteWeightedRoutingRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + weightedRoutingService.deleteWeightedRoutingMetadata(request, listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java new file mode 100644 index 0000000000000..d24c88ec674f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** delete weighted-round robin shard routing weights. */ +package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 5551c45ba9e57..77ddb5e17c742 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -95,6 +95,9 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; @@ -837,6 +840,21 @@ public interface ClusterAdminClient extends OpenSearchClient { */ ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting(); + /** + * Deletes weights for weighted round-robin search routing policy. + */ + ActionFuture deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request); + + /** + * Deletes weights for weighted round-robin search routing policy. + */ + void deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request, ActionListener listener); + + /** + * Deletes weights for weighted round-robin search routing policy. + */ + ClusterDeleteWeightedRoutingRequestBuilder prepareDeleteWeightedRouting(); + /** * Decommission awareness attribute */ diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index 87219dda49825..21f2a2d906602 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -52,6 +52,7 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -572,6 +573,15 @@ public static ClusterGetWeightedRoutingRequest getWeightedRoutingRequest(String return new ClusterGetWeightedRoutingRequest(attributeName); } + /** + * Deletes weights for weighted round-robin search routing policy + * + * @return delete weight request + */ + public static ClusterDeleteWeightedRoutingRequest deleteWeightedRoutingRequest() { + return new ClusterDeleteWeightedRoutingRequest(); + } + /** * Creates a new decommission request. * diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index bd82299725435..b42010d4253d5 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -122,6 +122,10 @@ import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingAction; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequestBuilder; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; @@ -1328,6 +1332,24 @@ public ClusterGetWeightedRoutingRequestBuilder prepareGetWeightedRouting() { return new ClusterGetWeightedRoutingRequestBuilder(this, ClusterGetWeightedRoutingAction.INSTANCE); } + @Override + public ActionFuture deleteWeightedRouting(ClusterDeleteWeightedRoutingRequest request) { + return execute(ClusterDeleteWeightedRoutingAction.INSTANCE, request); + } + + @Override + public void deleteWeightedRouting( + ClusterDeleteWeightedRoutingRequest request, + ActionListener listener + ) { + execute(ClusterDeleteWeightedRoutingAction.INSTANCE, request, listener); + } + + @Override + public ClusterDeleteWeightedRoutingRequestBuilder prepareDeleteWeightedRouting() { + return new ClusterDeleteWeightedRoutingRequestBuilder(this, ClusterDeleteWeightedRoutingAction.INSTANCE); + } + @Override public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { execute(DeleteDanglingIndexAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 7ff2b23630a3c..54f2c81aea384 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -12,7 +12,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -106,6 +108,34 @@ private boolean checkIfSameWeightsInMetadata( return newWeightedRoutingMetadata.getWeightedRouting().equals(oldWeightedRoutingMetadata.getWeightedRouting()); } + public void deleteWeightedRoutingMetadata( + final ClusterDeleteWeightedRoutingRequest request, + final ActionListener listener + ) { + clusterService.submitStateUpdateTask("delete_weighted_routing", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + logger.info("Deleting weighted routing metadata from the cluster state"); + Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); + mdBuilder.removeCustom(WeightedRoutingMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("failed to remove weighted routing metadata from cluster state", e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("cluster weighted routing metadata change is processed by all the nodes"); + assert newState.metadata().weightedRoutingMetadata() == null; + listener.onResponse(new ClusterDeleteWeightedRoutingResponse(true)); + } + }); + } + List getAwarenessAttributes() { return awarenessAttributes; } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java new file mode 100644 index 0000000000000..9742cc373d520 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingAction.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Delete Weighted Round Robin based shard routing weights + * + * @opensearch.api + * + */ +public class RestClusterDeleteWeightedRoutingAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestClusterDeleteWeightedRoutingAction.class); + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/routing/awareness/weights")); + } + + @Override + public String getName() { + return "delete_weighted_routing_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + ClusterDeleteWeightedRoutingRequest clusterDeleteWeightedRoutingRequest = Requests.deleteWeightedRoutingRequest(); + return channel -> client.admin() + .cluster() + .deleteWeightedRouting(clusterDeleteWeightedRoutingRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index 557c5c7ac910d..fc5d46ef84c79 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -12,7 +12,9 @@ import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; import org.opensearch.client.node.NodeClient; @@ -233,6 +235,32 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testDeleteWeightedRoutingMetadata() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterDeleteWeightedRoutingRequest clusterDeleteWeightedRoutingRequest = new ClusterDeleteWeightedRoutingRequest(); + final CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterDeleteWeightedRoutingResponse clusterDeleteWeightedRoutingResponse) { + assertTrue(clusterDeleteWeightedRoutingResponse.isAcknowledged()); + assertNull(clusterService.state().metadata().weightedRoutingMetadata()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("on failure shouldn't have been called"); + } + }; + weightedRoutingService.deleteWeightedRoutingMetadata(clusterDeleteWeightedRoutingRequest, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + public void testVerifyAwarenessAttribute_InvalidAttributeName() { assertThrows( "invalid awareness attribute %s requested for updating weighted routing", From 12f26d3d10c413aae6b1abffbe384169fcaea0f7 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 13 Oct 2022 16:36:59 -0400 Subject: [PATCH 056/122] Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) (#4779) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + .../repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - .../repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 | 1 + plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 | 1 - 14 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e580b2b101af..f3d6c2e05553e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) +- Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a779389b3ca82..08784c82a4cc4 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -10,7 +10,7 @@ bundled_jdk = 17.0.4+8 spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.4 -jackson_databind = 2.13.4 +jackson_databind = 2.13.4.2 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file From 6bae823cefad7373a748ce833ce75d80f48b380b Mon Sep 17 00:00:00 2001 From: Ayush Kataria <31301636+ayushKataria@users.noreply.github.com> Date: Fri, 14 Oct 2022 04:30:50 +0530 Subject: [PATCH 057/122] =?UTF-8?q?Use=20ReplicationFailedException=20inst?= =?UTF-8?q?ead=20of=20OpensearchException=20in=20Repl=E2=80=A6=20(#4725)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use ReplicationFailedException instead of OpensearchException in ReplicationTarget Signed-off-by: Ayush Kataria * CHANGELOG.md updated Signed-off-by: Ayush Kataria * test fixes Signed-off-by: Ayush Kataria * spotless fix Signed-off-by: Ayush Kataria * spotless fix Signed-off-by: Ayush Kataria * fixes for failing test as suggested in PR comments Signed-off-by: Ayush Kataria Signed-off-by: Ayush Kataria --- CHANGELOG.md | 1 + .../indices/recovery/RecoveryFailedException.java | 4 ++-- .../indices/recovery/RecoveryListener.java | 4 ++-- .../opensearch/indices/recovery/RecoveryTarget.java | 8 ++++---- .../replication/SegmentReplicationTarget.java | 9 +++------ .../SegmentReplicationTargetService.java | 13 +++++++------ .../replication/common/ReplicationCollection.java | 8 +++----- .../common/ReplicationFailedException.java | 12 ++++++++++++ .../replication/common/ReplicationListener.java | 4 +--- .../replication/common/ReplicationTarget.java | 7 +++---- .../shard/SegmentReplicationIndexShardTests.java | 5 ++--- .../opensearch/indices/recovery/RecoveryTests.java | 4 ++-- .../SegmentReplicationTargetServiceTests.java | 5 +++-- .../replication/SegmentReplicationTargetTests.java | 12 ++++++------ .../recovery/ReplicationCollectionTests.java | 5 +++-- .../opensearch/index/shard/IndexShardTestCase.java | 10 +++++++--- 16 files changed, 61 insertions(+), 50 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d6c2e05553e..3a8bee55d881d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) - Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) +- Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) - Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java index f2665e8bbb1a7..12393ab12c95d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFailedException.java @@ -32,11 +32,11 @@ package org.opensearch.indices.recovery; -import org.opensearch.OpenSearchException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationFailedException; import java.io.IOException; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class RecoveryFailedException extends OpenSearchException { +public class RecoveryFailedException extends ReplicationFailedException { public RecoveryFailedException(StartRecoveryRequest request, Throwable cause) { this(request, null, cause); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java index b93c054ffa4bf..c8c2fbfc896eb 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java @@ -8,9 +8,9 @@ package org.opensearch.indices.recovery; -import org.opensearch.OpenSearchException; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; @@ -49,7 +49,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { indicesClusterStateService.handleRecoveryFailure(shardRouting, sendShardFailure, e); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 7acc6b8b54fdd..c1e29e0d866d8 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; @@ -56,10 +55,11 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTarget; -import org.opensearch.indices.replication.common.ReplicationListener; -import org.opensearch.indices.replication.common.ReplicationCollection; import java.io.IOException; import java.nio.channels.FileChannel; @@ -135,7 +135,7 @@ public String description() { } @Override - public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + public void notifyListener(ReplicationFailedException e, boolean sendShardFailure) { listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 26bec2203c599..c12d9b7165ae0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -18,7 +18,6 @@ import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; @@ -105,16 +104,14 @@ public String description() { } @Override - public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + public void notifyListener(ReplicationFailedException e, boolean sendShardFailure) { // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation // update the stage. final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); if (cancelledException != null) { state.setStage(SegmentReplicationState.Stage.CANCELLED); - listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); - } else { - listener.onFailure(state(), e, sendShardFailure); } + listener.onFailure(state(), e, sendShardFailure); } @Override @@ -150,7 +147,7 @@ public void startReplication(ActionListener listener) { // SegmentReplicationSource does not share CancellableThreads. final CancellableThreads.ExecutionCancelledException executionCancelledException = new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); - notifyListener(executionCancelledException, false); + notifyListener(new ReplicationFailedException("Segment replication failed", executionCancelledException), false); throw executionCancelledException; }); state.setStage(SegmentReplicationState.Stage.REPLICATING); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 8fc53ccd3bc08..b633f0fa3b9a0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; @@ -27,6 +26,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.tasks.Task; @@ -196,7 +196,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { logger.trace( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication failed, timing data: {}", @@ -249,13 +249,13 @@ default void onDone(ReplicationState state) { } @Override - default void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + default void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { onReplicationFailure((SegmentReplicationState) state, e, sendShardFailure); } void onReplicationDone(SegmentReplicationState state); - void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure); + void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure); } /** @@ -293,13 +293,14 @@ public void onFailure(Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { if (onGoingReplications.getTarget(replicationId) != null) { + IndexShard indexShard = onGoingReplications.getTarget(replicationId).indexShard(); // if the target still exists in our collection, the primary initiated the cancellation, fail the replication // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with // onGoingReplications.cancel and not appear in the collection when this listener resolves. - onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + onGoingReplications.fail(replicationId, new ReplicationFailedException(indexShard, cause), false); } } else { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), true); } } }); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index 20600856c9444..e918ac0a79691 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchException; -import org.opensearch.OpenSearchTimeoutException; import org.opensearch.common.concurrent.AutoCloseableRefCounted; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -134,7 +132,7 @@ public T reset(final long id, final TimeValue activityTimeout) { } catch (Exception e) { // fail shard to be safe assert oldTarget != null; - oldTarget.notifyListener(new OpenSearchException("Unable to reset target", e), true); + oldTarget.notifyListener(new ReplicationFailedException("Unable to reset target", e), true); return null; } } @@ -187,7 +185,7 @@ public boolean cancel(long id, String reason) { * @param e exception with reason for the failure * @param sendShardFailure true a shard failed message should be sent to the master */ - public void fail(long id, OpenSearchException e, boolean sendShardFailure) { + public void fail(long id, ReplicationFailedException e, boolean sendShardFailure) { T removed = onGoingTargetEvents.remove(id); if (removed != null) { logger.trace("failing {}. Send shard failure: [{}]", removed.description(), sendShardFailure); @@ -299,7 +297,7 @@ protected void doRun() throws Exception { String message = "no activity after [" + checkInterval + "]"; fail( id, - new OpenSearchTimeoutException(message), + new ReplicationFailedException(message), true // to be safe, we don't know what go stuck ); return; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java index afdd0ce466f9b..23ad4d0e096b5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java @@ -38,4 +38,16 @@ public ReplicationFailedException(ShardId shardId, @Nullable String extraInfo, T public ReplicationFailedException(StreamInput in) throws IOException { super(in); } + + public ReplicationFailedException(Exception e) { + super(e); + } + + public ReplicationFailedException(String msg) { + super(msg); + } + + public ReplicationFailedException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java index 0666f475d496a..1d3c48d084f60 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java @@ -8,8 +8,6 @@ package org.opensearch.indices.replication.common; -import org.opensearch.OpenSearchException; - /** * Interface for listeners that run when there's a change in {@link ReplicationState} * @@ -19,5 +17,5 @@ public interface ReplicationListener { void onDone(ReplicationState state); - void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure); + void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 42f4572fef3e4..1e2dcbb46256f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.common.CheckedFunction; @@ -78,7 +77,7 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - public abstract void notifyListener(OpenSearchException e, boolean sendShardFailure); + public abstract void notifyListener(ReplicationFailedException e, boolean sendShardFailure); public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { super(name); @@ -170,7 +169,7 @@ public void cancel(String reason) { * @param e exception that encapsulates the failure * @param sendShardFailure indicates whether to notify the master of the shard failure */ - public void fail(OpenSearchException e, boolean sendShardFailure) { + public void fail(ReplicationFailedException e, boolean sendShardFailure) { if (finished.compareAndSet(false, true)) { try { notifyListener(e, sendShardFailure); @@ -187,7 +186,7 @@ public void fail(OpenSearchException e, boolean sendShardFailure) { protected void ensureRefCount() { if (refCount() <= 0) { - throw new OpenSearchException( + throw new ReplicationFailedException( "ReplicationTarget is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls" ); } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index da04ea1b9914b..b3cbd9b00f0b1 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.junit.Assert; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; @@ -44,6 +43,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -670,8 +670,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertFalse(sendShardFailure); assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); latch.countDown(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index cc5100fba9010..3d04f808bc30c 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.bulk.BulkShardRequest; @@ -70,6 +69,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; @@ -471,7 +471,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); } })) diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7437cb22e44d1..9a9c5b989dea9 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -22,6 +22,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; @@ -104,7 +105,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { logger.error("Unexpected error", e); Assert.fail("Test should succeed"); } @@ -149,7 +150,7 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { // failures leave state object in last entered stage. assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 34ca94bba02f3..ffea4aaf6b7c4 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -27,7 +27,6 @@ import org.junit.Assert; import org.mockito.Mockito; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -40,6 +39,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -199,7 +199,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -242,7 +242,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -287,7 +287,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -332,7 +332,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } @@ -374,7 +374,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assert (e instanceof IllegalStateException); - segrepTarget.fail(new OpenSearchException(e), false); + segrepTarget.fail(new ReplicationFailedException(e), false); } }); } diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 1789dd3b2a288..75ac1075e8ee0 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -39,6 +39,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.recovery.RecoveryState; @@ -59,7 +60,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { } }; @@ -93,7 +94,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { failed.set(true); latch.countDown(); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 1fcdfd79c544e..f520206e0f866 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -39,7 +39,6 @@ import org.apache.lucene.store.IndexInput; import org.junit.Assert; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; @@ -119,6 +118,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -181,7 +181,7 @@ public void onDone(ReplicationState state) { } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { throw new AssertionError(e); } }; @@ -1282,7 +1282,11 @@ public void onReplicationDone(SegmentReplicationState state) { } @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } From 5d7d83e5852ab38095ea02a1248ea40b4416cd4b Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 13 Oct 2022 16:48:50 -0700 Subject: [PATCH 058/122] Update version check after backport (#4786) Now that the searchable snapshot API change has been backported to 2.x, the version guard has to change from 3.0 to 2.4. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../cluster/snapshots/restore/RestoreSnapshotRequest.java | 4 ++-- .../java/org/opensearch/cluster/routing/RecoverySource.java | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a8bee55d881d..69fb6b42fe29f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -132,6 +132,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) +- Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 3ecf5ab19c0e4..5fd83244f3dea 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -152,7 +152,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { snapshotUuid = in.readOptionalString(); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_2_4_0)) { storageType = in.readEnum(StorageType.class); } } @@ -182,7 +182,7 @@ public void writeTo(StreamOutput out) throws IOException { "restricting the snapshot UUID is forbidden in a cluster with version [" + out.getVersion() + "] nodes" ); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeEnum(storageType); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 728bf9d1ae90e..3e4feb02686d6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -287,7 +287,7 @@ public SnapshotRecoverySource( } else { index = new IndexId(in.readString(), IndexMetadata.INDEX_UUID_NA_VALUE); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_2_4_0)) { isSearchableSnapshot = in.readBoolean(); } else { isSearchableSnapshot = false; @@ -330,7 +330,7 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { } else { out.writeString(index.getName()); } - if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_2_4_0)) { out.writeBoolean(isSearchableSnapshot); } } From d70886091042c12978acbecced80736c4f0284a9 Mon Sep 17 00:00:00 2001 From: Ketan Verma Date: Fri, 14 Oct 2022 15:50:54 +0530 Subject: [PATCH 059/122] Added in-flight cancellation of SearchShardTask based on resource consumption (#4575) * Added in-flight cancellation of SearchShardTask based on resource consumption This feature aims to identify and cancel resource intensive SearchShardTasks if they have breached certain thresholds. This will help in terminating problematic queries which can put nodes in duress and degrade the cluster performance. Signed-off-by: Ketan Verma --- CHANGELOG.md | 2 + .../org/opensearch/cluster/ClusterModule.java | 2 - .../common/settings/ClusterSettings.java | 20 +- .../opensearch/common/util/MovingAverage.java | 57 ++++ .../org/opensearch/common/util/Streak.java | 33 ++ .../opensearch/common/util/TokenBucket.java | 124 +++++++ .../main/java/org/opensearch/node/Node.java | 27 +- .../java/org/opensearch/node/NodeService.java | 10 +- .../SearchBackpressureService.java | 311 ++++++++++++++++++ .../backpressure/SearchBackpressureState.java | 57 ++++ .../search/backpressure/package-info.java | 12 + .../settings/NodeDuressSettings.java | 99 ++++++ .../settings/SearchBackpressureSettings.java | 213 ++++++++++++ .../settings/SearchShardTaskSettings.java | 160 +++++++++ .../backpressure/settings/package-info.java | 12 + .../trackers/NodeDuressTracker.java | 41 +++ .../trackers/TaskResourceUsageTracker.java | 50 +++ .../backpressure/trackers/package-info.java | 12 + .../org/opensearch/tasks/CancellableTask.java | 2 +- .../opensearch/tasks/TaskCancellation.java | 108 ++++++ .../tasks/TaskResourceTrackingService.java | 22 ++ .../org/opensearch/common/StreakTests.java | 34 ++ .../common/util/MovingAverageTests.java | 49 +++ .../common/util/TokenBucketTests.java | 77 +++++ .../SearchBackpressureServiceTests.java | 213 ++++++++++++ .../trackers/NodeDuressTrackerTests.java | 35 ++ .../tasks/TaskCancellationTests.java | 72 ++++ .../SearchBackpressureTestHelpers.java | 47 +++ 28 files changed, 1895 insertions(+), 6 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/util/MovingAverage.java create mode 100644 server/src/main/java/org/opensearch/common/util/Streak.java create mode 100644 server/src/main/java/org/opensearch/common/util/TokenBucket.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java create mode 100644 server/src/main/java/org/opensearch/tasks/TaskCancellation.java create mode 100644 server/src/test/java/org/opensearch/common/StreakTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/MovingAverageTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/TokenBucketTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java create mode 100644 test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 69fb6b42fe29f..15dedbde22d41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) +- Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 3ac3a08d4848a..6279447485348 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -96,7 +96,6 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -420,7 +419,6 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); - bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 54579031aac08..320cb5457b21c 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -41,6 +41,9 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.search.backpressure.settings.NodeDuressSettings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; @@ -581,7 +584,22 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, IndexingPressure.MAX_INDEXING_BYTES, TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED, - TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED + TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED, + + // Settings related to search backpressure + SearchBackpressureSettings.SETTING_ENABLED, + SearchBackpressureSettings.SETTING_ENFORCED, + SearchBackpressureSettings.SETTING_CANCELLATION_RATIO, + SearchBackpressureSettings.SETTING_CANCELLATION_RATE, + SearchBackpressureSettings.SETTING_CANCELLATION_BURST, + NodeDuressSettings.SETTING_NUM_SUCCESSIVE_BREACHES, + NodeDuressSettings.SETTING_CPU_THRESHOLD, + NodeDuressSettings.SETTING_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_TOTAL_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_HEAP_THRESHOLD, + SearchShardTaskSettings.SETTING_HEAP_VARIANCE_THRESHOLD, + SearchShardTaskSettings.SETTING_CPU_TIME_THRESHOLD, + SearchShardTaskSettings.SETTING_ELAPSED_TIME_THRESHOLD ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/MovingAverage.java b/server/src/main/java/org/opensearch/common/util/MovingAverage.java new file mode 100644 index 0000000000000..650ba62ecd8c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/MovingAverage.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +/** + * MovingAverage is used to calculate the moving average of last 'n' observations. + * + * @opensearch.internal + */ +public class MovingAverage { + private final int windowSize; + private final long[] observations; + + private long count = 0; + private long sum = 0; + private double average = 0; + + public MovingAverage(int windowSize) { + if (windowSize <= 0) { + throw new IllegalArgumentException("window size must be greater than zero"); + } + + this.windowSize = windowSize; + this.observations = new long[windowSize]; + } + + /** + * Records a new observation and evicts the n-th last observation. + */ + public synchronized double record(long value) { + long delta = value - observations[(int) (count % observations.length)]; + observations[(int) (count % observations.length)] = value; + + count++; + sum += delta; + average = (double) sum / Math.min(count, observations.length); + return average; + } + + public double getAverage() { + return average; + } + + public long getCount() { + return count; + } + + public boolean isReady() { + return count >= windowSize; + } +} diff --git a/server/src/main/java/org/opensearch/common/util/Streak.java b/server/src/main/java/org/opensearch/common/util/Streak.java new file mode 100644 index 0000000000000..5f6ad3021659e --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/Streak.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Streak is a data structure that keeps track of the number of successive successful events. + * + * @opensearch.internal + */ +public class Streak { + private final AtomicInteger successiveSuccessfulEvents = new AtomicInteger(); + + public int record(boolean isSuccessful) { + if (isSuccessful) { + return successiveSuccessfulEvents.incrementAndGet(); + } else { + successiveSuccessfulEvents.set(0); + return 0; + } + } + + public int length() { + return successiveSuccessfulEvents.get(); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java new file mode 100644 index 0000000000000..e47f152d71363 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; + +/** + * TokenBucket is used to limit the number of operations at a constant rate while allowing for short bursts. + * + * @opensearch.internal + */ +public class TokenBucket { + /** + * Defines a monotonically increasing counter. + * + * Usage examples: + * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time + * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations + */ + private final LongSupplier clock; + + /** + * Defines the number of tokens added to the bucket per clock cycle. + */ + private final double rate; + + /** + * Defines the capacity and the maximum number of operations that can be performed per clock cycle before + * the bucket runs out of tokens. + */ + private final double burst; + + /** + * Defines the current state of the token bucket. + */ + private final AtomicReference state; + + public TokenBucket(LongSupplier clock, double rate, double burst) { + this(clock, rate, burst, burst); + } + + public TokenBucket(LongSupplier clock, double rate, double burst, double initialTokens) { + if (rate <= 0.0) { + throw new IllegalArgumentException("rate must be greater than zero"); + } + + if (burst <= 0.0) { + throw new IllegalArgumentException("burst must be greater than zero"); + } + + this.clock = clock; + this.rate = rate; + this.burst = burst; + this.state = new AtomicReference<>(new State(Math.min(initialTokens, burst), clock.getAsLong())); + } + + /** + * If there are enough tokens in the bucket, it requests/deducts 'n' tokens and returns true. + * Otherwise, returns false and leaves the bucket untouched. + */ + public boolean request(double n) { + if (n <= 0) { + throw new IllegalArgumentException("requested tokens must be greater than zero"); + } + + // Refill tokens + State currentState, updatedState; + do { + currentState = state.get(); + long now = clock.getAsLong(); + double incr = (now - currentState.lastRefilledAt) * rate; + updatedState = new State(Math.min(currentState.tokens + incr, burst), now); + } while (state.compareAndSet(currentState, updatedState) == false); + + // Deduct tokens + do { + currentState = state.get(); + if (currentState.tokens < n) { + return false; + } + updatedState = new State(currentState.tokens - n, currentState.lastRefilledAt); + } while (state.compareAndSet(currentState, updatedState) == false); + + return true; + } + + public boolean request() { + return request(1.0); + } + + /** + * Represents an immutable token bucket state. + */ + private static class State { + final double tokens; + final double lastRefilledAt; + + public State(double tokens, double lastRefilledAt) { + this.tokens = tokens; + this.lastRefilledAt = lastRefilledAt; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + State state = (State) o; + return Double.compare(state.tokens, tokens) == 0 && Double.compare(state.lastRefilledAt, lastRefilledAt) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(tokens, lastRefilledAt); + } + } +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 504550378e14f..54696b1f6b118 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -43,6 +43,8 @@ import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.search.backpressure.SearchBackpressureService; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; @@ -796,6 +798,23 @@ protected Node( // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). clusterService.setIndexingPressureService(indexingPressureService); + final TaskResourceTrackingService taskResourceTrackingService = new TaskResourceTrackingService( + settings, + clusterService.getClusterSettings(), + threadPool + ); + + final SearchBackpressureSettings searchBackpressureSettings = new SearchBackpressureSettings( + settings, + clusterService.getClusterSettings() + ); + + final SearchBackpressureService searchBackpressureService = new SearchBackpressureService( + searchBackpressureSettings, + taskResourceTrackingService, + threadPool + ); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, @@ -883,7 +902,8 @@ protected Node( responseCollectorService, searchTransportService, indexingPressureService, - searchModule.getValuesSourceRegistry().getUsageService() + searchModule.getValuesSourceRegistry().getUsageService(), + searchBackpressureService ); final SearchService searchService = newSearchService( @@ -941,6 +961,8 @@ protected Node( b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry()); b.bind(IngestService.class).toInstance(ingestService); b.bind(IndexingPressureService.class).toInstance(indexingPressureService); + b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); + b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); @@ -1104,6 +1126,7 @@ public Node start() throws NodeValidationException { injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); nodeService.getMonitorService().start(); + nodeService.getSearchBackpressureService().start(); final ClusterService clusterService = injector.getInstance(ClusterService.class); @@ -1259,6 +1282,7 @@ private Node stop() { injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); nodeService.getMonitorService().stop(); + nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); injector.getInstance(TransportService.class).stop(); @@ -1318,6 +1342,7 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); toClose.add(nodeService.getMonitorService()); + toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index ab98b47c7287b..f4e037db66ca2 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -53,6 +53,7 @@ import org.opensearch.plugins.PluginsService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; +import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -81,6 +82,7 @@ public class NodeService implements Closeable { private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; + private final SearchBackpressureService searchBackpressureService; private final Discovery discovery; @@ -101,7 +103,8 @@ public class NodeService implements Closeable { ResponseCollectorService responseCollectorService, SearchTransportService searchTransportService, IndexingPressureService indexingPressureService, - AggregationUsageService aggregationUsageService + AggregationUsageService aggregationUsageService, + SearchBackpressureService searchBackpressureService ) { this.settings = settings; this.threadPool = threadPool; @@ -119,6 +122,7 @@ public class NodeService implements Closeable { this.searchTransportService = searchTransportService; this.indexingPressureService = indexingPressureService; this.aggregationUsageService = aggregationUsageService; + this.searchBackpressureService = searchBackpressureService; clusterService.addStateApplier(ingestService); } @@ -203,6 +207,10 @@ public MonitorService getMonitorService() { return monitorService; } + public SearchBackpressureService getSearchBackpressureService() { + return searchBackpressureService; + } + @Override public void close() throws IOException { IOUtils.close(indicesService); diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java new file mode 100644 index 0000000000000..885846a177d60 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -0,0 +1,311 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.util.TokenBucket; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.trackers.NodeDuressTracker; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.tasks.TaskResourceTrackingService.TaskCompletionListener; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** + * SearchBackpressureService is responsible for monitoring and cancelling in-flight search tasks if they are + * breaching resource usage limits when the node is in duress. + * + * @opensearch.internal + */ +public class SearchBackpressureService extends AbstractLifecycleComponent + implements + TaskCompletionListener, + SearchBackpressureSettings.Listener { + private static final Logger logger = LogManager.getLogger(SearchBackpressureService.class); + + private volatile Scheduler.Cancellable scheduledFuture; + + private final SearchBackpressureSettings settings; + private final TaskResourceTrackingService taskResourceTrackingService; + private final ThreadPool threadPool; + private final LongSupplier timeNanosSupplier; + + private final List nodeDuressTrackers; + private final List taskResourceUsageTrackers; + + private final AtomicReference taskCancellationRateLimiter = new AtomicReference<>(); + private final AtomicReference taskCancellationRatioLimiter = new AtomicReference<>(); + + // Currently, only the state of SearchShardTask is being tracked. + // This can be generalized to Map once we start supporting cancellation of SearchTasks as well. + private final SearchBackpressureState state = new SearchBackpressureState(); + + public SearchBackpressureService( + SearchBackpressureSettings settings, + TaskResourceTrackingService taskResourceTrackingService, + ThreadPool threadPool + ) { + this( + settings, + taskResourceTrackingService, + threadPool, + System::nanoTime, + List.of( + new NodeDuressTracker( + () -> ProcessProbe.getInstance().getProcessCpuPercent() / 100.0 >= settings.getNodeDuressSettings().getCpuThreshold() + ), + new NodeDuressTracker( + () -> JvmStats.jvmStats().getMem().getHeapUsedPercent() / 100.0 >= settings.getNodeDuressSettings().getHeapThreshold() + ) + ), + Collections.emptyList() + ); + } + + public SearchBackpressureService( + SearchBackpressureSettings settings, + TaskResourceTrackingService taskResourceTrackingService, + ThreadPool threadPool, + LongSupplier timeNanosSupplier, + List nodeDuressTrackers, + List taskResourceUsageTrackers + ) { + this.settings = settings; + this.settings.setListener(this); + this.taskResourceTrackingService = taskResourceTrackingService; + this.taskResourceTrackingService.addTaskCompletionListener(this); + this.threadPool = threadPool; + this.timeNanosSupplier = timeNanosSupplier; + this.nodeDuressTrackers = nodeDuressTrackers; + this.taskResourceUsageTrackers = taskResourceUsageTrackers; + + this.taskCancellationRateLimiter.set( + new TokenBucket(timeNanosSupplier, getSettings().getCancellationRateNanos(), getSettings().getCancellationBurst()) + ); + + this.taskCancellationRatioLimiter.set( + new TokenBucket(state::getCompletionCount, getSettings().getCancellationRatio(), getSettings().getCancellationBurst()) + ); + } + + void doRun() { + if (getSettings().isEnabled() == false) { + return; + } + + if (isNodeInDuress() == false) { + return; + } + + // We are only targeting in-flight cancellation of SearchShardTask for now. + List searchShardTasks = getSearchShardTasks(); + + // Force-refresh usage stats of these tasks before making a cancellation decision. + taskResourceTrackingService.refreshResourceStats(searchShardTasks.toArray(new Task[0])); + + // Skip cancellation if the increase in heap usage is not due to search requests. + if (isHeapUsageDominatedBySearch(searchShardTasks) == false) { + return; + } + + for (TaskCancellation taskCancellation : getTaskCancellations(searchShardTasks)) { + logger.debug( + "cancelling task [{}] due to high resource consumption [{}]", + taskCancellation.getTask().getId(), + taskCancellation.getReasonString() + ); + + if (getSettings().isEnforced() == false) { + continue; + } + + // Independently remove tokens from both token buckets. + boolean rateLimitReached = taskCancellationRateLimiter.get().request() == false; + boolean ratioLimitReached = taskCancellationRatioLimiter.get().request() == false; + + // Stop cancelling tasks if there are no tokens in either of the two token buckets. + if (rateLimitReached && ratioLimitReached) { + logger.debug("task cancellation limit reached"); + state.incrementLimitReachedCount(); + break; + } + + taskCancellation.cancel(); + state.incrementCancellationCount(); + } + } + + /** + * Returns true if the node is in duress consecutively for the past 'n' observations. + */ + boolean isNodeInDuress() { + boolean isNodeInDuress = false; + int numSuccessiveBreaches = getSettings().getNodeDuressSettings().getNumSuccessiveBreaches(); + + for (NodeDuressTracker tracker : nodeDuressTrackers) { + if (tracker.check() >= numSuccessiveBreaches) { + isNodeInDuress = true; // not breaking the loop so that each tracker's streak gets updated. + } + } + + return isNodeInDuress; + } + + /** + * Returns true if the increase in heap usage is due to search requests. + */ + boolean isHeapUsageDominatedBySearch(List searchShardTasks) { + long runningTasksHeapUsage = searchShardTasks.stream().mapToLong(task -> task.getTotalResourceStats().getMemoryInBytes()).sum(); + long searchTasksHeapThreshold = getSettings().getSearchShardTaskSettings().getTotalHeapThresholdBytes(); + if (runningTasksHeapUsage < searchTasksHeapThreshold) { + logger.debug("heap usage not dominated by search requests [{}/{}]", runningTasksHeapUsage, searchTasksHeapThreshold); + return false; + } + + return true; + } + + /** + * Filters and returns the list of currently running SearchShardTasks. + */ + List getSearchShardTasks() { + return taskResourceTrackingService.getResourceAwareTasks() + .values() + .stream() + .filter(task -> task instanceof SearchShardTask) + .map(task -> (CancellableTask) task) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Returns a TaskCancellation wrapper containing the list of reasons (possibly zero), along with an overall + * cancellation score for the given task. Cancelling a task with a higher score has better chance of recovering the + * node from duress. + */ + TaskCancellation getTaskCancellation(CancellableTask task) { + List reasons = new ArrayList<>(); + List callbacks = new ArrayList<>(); + + for (TaskResourceUsageTracker tracker : taskResourceUsageTrackers) { + Optional reason = tracker.cancellationReason(task); + if (reason.isPresent()) { + reasons.add(reason.get()); + callbacks.add(tracker::incrementCancellations); + } + } + + return new TaskCancellation(task, reasons, callbacks); + } + + /** + * Returns a list of TaskCancellations sorted by descending order of their cancellation scores. + */ + List getTaskCancellations(List tasks) { + return tasks.stream() + .map(this::getTaskCancellation) + .filter(TaskCancellation::isEligibleForCancellation) + .sorted(Comparator.reverseOrder()) + .collect(Collectors.toUnmodifiableList()); + } + + SearchBackpressureSettings getSettings() { + return settings; + } + + SearchBackpressureState getState() { + return state; + } + + @Override + public void onTaskCompleted(Task task) { + if (getSettings().isEnabled() == false) { + return; + } + + if (task instanceof SearchShardTask == false) { + return; + } + + SearchShardTask searchShardTask = (SearchShardTask) task; + if (searchShardTask.isCancelled() == false) { + state.incrementCompletionCount(); + } + + List exceptions = new ArrayList<>(); + for (TaskResourceUsageTracker tracker : taskResourceUsageTrackers) { + try { + tracker.update(searchShardTask); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + @Override + public void onCancellationRatioChanged() { + taskCancellationRatioLimiter.set( + new TokenBucket(state::getCompletionCount, getSettings().getCancellationRatio(), getSettings().getCancellationBurst()) + ); + } + + @Override + public void onCancellationRateChanged() { + taskCancellationRateLimiter.set( + new TokenBucket(timeNanosSupplier, getSettings().getCancellationRateNanos(), getSettings().getCancellationBurst()) + ); + } + + @Override + public void onCancellationBurstChanged() { + onCancellationRatioChanged(); + onCancellationRateChanged(); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + doRun(); + } catch (Exception e) { + logger.debug("failure in search search backpressure", e); + } + }, getSettings().getInterval(), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() throws IOException {} +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java new file mode 100644 index 0000000000000..a62231ec29ede --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureState.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Tracks the current state of task completions and cancellations. + * + * @opensearch.internal + */ +public class SearchBackpressureState { + /** + * The number of successful task completions. + */ + private final AtomicLong completionCount = new AtomicLong(); + + /** + * The number of task cancellations due to limit breaches. + */ + private final AtomicLong cancellationCount = new AtomicLong(); + + /** + * The number of times task cancellation limit was reached. + */ + private final AtomicLong limitReachedCount = new AtomicLong(); + + public long getCompletionCount() { + return completionCount.get(); + } + + long incrementCompletionCount() { + return completionCount.incrementAndGet(); + } + + public long getCancellationCount() { + return cancellationCount.get(); + } + + long incrementCancellationCount() { + return cancellationCount.incrementAndGet(); + } + + public long getLimitReachedCount() { + return limitReachedCount.get(); + } + + long incrementLimitReachedCount() { + return limitReachedCount.incrementAndGet(); + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/package-info.java new file mode 100644 index 0000000000000..36d216993b2fc --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes responsible for search backpressure. + */ +package org.opensearch.search.backpressure; diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java new file mode 100644 index 0000000000000..09c1e4fcef46c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/NodeDuressSettings.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Defines the settings for a node to be considered in duress. + * + * @opensearch.internal + */ +public class NodeDuressSettings { + private static class Defaults { + private static final int NUM_SUCCESSIVE_BREACHES = 3; + private static final double CPU_THRESHOLD = 0.9; + private static final double HEAP_THRESHOLD = 0.7; + } + + /** + * Defines the number of successive limit breaches after the node is marked "in duress". + */ + private volatile int numSuccessiveBreaches; + public static final Setting SETTING_NUM_SUCCESSIVE_BREACHES = Setting.intSetting( + "search_backpressure.node_duress.num_successive_breaches", + Defaults.NUM_SUCCESSIVE_BREACHES, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the CPU usage threshold (in percentage) for a node to be considered "in duress". + */ + private volatile double cpuThreshold; + public static final Setting SETTING_CPU_THRESHOLD = Setting.doubleSetting( + "search_backpressure.node_duress.cpu_threshold", + Defaults.CPU_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage threshold (in percentage) for a node to be considered "in duress". + */ + private volatile double heapThreshold; + public static final Setting SETTING_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.node_duress.heap_threshold", + Defaults.HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public NodeDuressSettings(Settings settings, ClusterSettings clusterSettings) { + numSuccessiveBreaches = SETTING_NUM_SUCCESSIVE_BREACHES.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_NUM_SUCCESSIVE_BREACHES, this::setNumSuccessiveBreaches); + + cpuThreshold = SETTING_CPU_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CPU_THRESHOLD, this::setCpuThreshold); + + heapThreshold = SETTING_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_THRESHOLD, this::setHeapThreshold); + } + + public int getNumSuccessiveBreaches() { + return numSuccessiveBreaches; + } + + private void setNumSuccessiveBreaches(int numSuccessiveBreaches) { + this.numSuccessiveBreaches = numSuccessiveBreaches; + } + + public double getCpuThreshold() { + return cpuThreshold; + } + + private void setCpuThreshold(double cpuThreshold) { + this.cpuThreshold = cpuThreshold; + } + + public double getHeapThreshold() { + return heapThreshold; + } + + private void setHeapThreshold(double heapThreshold) { + this.heapThreshold = heapThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java new file mode 100644 index 0000000000000..4834808d768f1 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.concurrent.TimeUnit; + +/** + * Settings related to search backpressure and cancellation of in-flight requests. + * + * @opensearch.internal + */ +public class SearchBackpressureSettings { + private static class Defaults { + private static final long INTERVAL = 1000; + + private static final boolean ENABLED = true; + private static final boolean ENFORCED = false; + + private static final double CANCELLATION_RATIO = 0.1; + private static final double CANCELLATION_RATE = 0.003; + private static final double CANCELLATION_BURST = 10.0; + } + + /** + * Defines the interval (in millis) at which the SearchBackpressureService monitors and cancels tasks. + */ + private final TimeValue interval; + public static final Setting SETTING_INTERVAL = Setting.longSetting( + "search_backpressure.interval", + Defaults.INTERVAL, + 1, + Setting.Property.NodeScope + ); + + /** + * Defines whether search backpressure is enabled or not. + */ + private volatile boolean enabled; + public static final Setting SETTING_ENABLED = Setting.boolSetting( + "search_backpressure.enabled", + Defaults.ENABLED, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines whether in-flight cancellation of tasks is enabled or not. + */ + private volatile boolean enforced; + public static final Setting SETTING_ENFORCED = Setting.boolSetting( + "search_backpressure.enforced", + Defaults.ENFORCED, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the percentage of tasks to cancel relative to the number of successful task completions. + * In other words, it is the number of tokens added to the bucket on each successful task completion. + */ + private volatile double cancellationRatio; + public static final Setting SETTING_CANCELLATION_RATIO = Setting.doubleSetting( + "search_backpressure.cancellation_ratio", + Defaults.CANCELLATION_RATIO, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the number of tasks to cancel per unit time (in millis). + * In other words, it is the number of tokens added to the bucket each millisecond. + */ + private volatile double cancellationRate; + public static final Setting SETTING_CANCELLATION_RATE = Setting.doubleSetting( + "search_backpressure.cancellation_rate", + Defaults.CANCELLATION_RATE, + 0.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the maximum number of tasks that can be cancelled before being rate-limited. + */ + private volatile double cancellationBurst; + public static final Setting SETTING_CANCELLATION_BURST = Setting.doubleSetting( + "search_backpressure.cancellation_burst", + Defaults.CANCELLATION_BURST, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Callback listeners. + */ + public interface Listener { + void onCancellationRatioChanged(); + + void onCancellationRateChanged(); + + void onCancellationBurstChanged(); + } + + private final SetOnce listener = new SetOnce<>(); + private final NodeDuressSettings nodeDuressSettings; + private final SearchShardTaskSettings searchShardTaskSettings; + + public SearchBackpressureSettings(Settings settings, ClusterSettings clusterSettings) { + this.nodeDuressSettings = new NodeDuressSettings(settings, clusterSettings); + this.searchShardTaskSettings = new SearchShardTaskSettings(settings, clusterSettings); + + interval = new TimeValue(SETTING_INTERVAL.get(settings)); + + enabled = SETTING_ENABLED.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ENABLED, this::setEnabled); + + enforced = SETTING_ENFORCED.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ENFORCED, this::setEnforced); + + cancellationRatio = SETTING_CANCELLATION_RATIO.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_RATIO, this::setCancellationRatio); + + cancellationRate = SETTING_CANCELLATION_RATE.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_RATE, this::setCancellationRate); + + cancellationBurst = SETTING_CANCELLATION_BURST.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_BURST, this::setCancellationBurst); + } + + public void setListener(Listener listener) { + this.listener.set(listener); + } + + public NodeDuressSettings getNodeDuressSettings() { + return nodeDuressSettings; + } + + public SearchShardTaskSettings getSearchShardTaskSettings() { + return searchShardTaskSettings; + } + + public TimeValue getInterval() { + return interval; + } + + public boolean isEnabled() { + return enabled; + } + + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public boolean isEnforced() { + return enforced; + } + + private void setEnforced(boolean enforced) { + this.enforced = enforced; + } + + public double getCancellationRatio() { + return cancellationRatio; + } + + private void setCancellationRatio(double cancellationRatio) { + this.cancellationRatio = cancellationRatio; + if (listener.get() != null) { + listener.get().onCancellationRatioChanged(); + } + } + + public double getCancellationRate() { + return cancellationRate; + } + + public double getCancellationRateNanos() { + return getCancellationRate() / TimeUnit.MILLISECONDS.toNanos(1); // rate per nanoseconds + } + + private void setCancellationRate(double cancellationRate) { + this.cancellationRate = cancellationRate; + if (listener.get() != null) { + listener.get().onCancellationRateChanged(); + } + } + + public double getCancellationBurst() { + return cancellationBurst; + } + + private void setCancellationBurst(double cancellationBurst) { + this.cancellationBurst = cancellationBurst; + if (listener.get() != null) { + listener.get().onCancellationBurstChanged(); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java new file mode 100644 index 0000000000000..1126dad78f554 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.monitor.jvm.JvmStats; + +/** + * Defines the settings related to the cancellation of SearchShardTasks. + * + * @opensearch.internal + */ +public class SearchShardTaskSettings { + private static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); + + private static class Defaults { + private static final double TOTAL_HEAP_THRESHOLD = 0.05; + private static final double HEAP_THRESHOLD = 0.005; + private static final double HEAP_VARIANCE_THRESHOLD = 2.0; + private static final long CPU_TIME_THRESHOLD = 15; + private static final long ELAPSED_TIME_THRESHOLD = 30000; + } + + /** + * Defines the heap usage threshold (in percentage) for the sum of heap usages across all search shard tasks + * before in-flight cancellation is applied. + */ + private volatile double totalHeapThreshold; + public static final Setting SETTING_TOTAL_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.total_heap_threshold", + Defaults.TOTAL_HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage threshold (in percentage) for an individual task before it is considered for cancellation. + */ + private volatile double heapThreshold; + public static final Setting SETTING_HEAP_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_threshold", + Defaults.HEAP_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage variance for an individual task before it is considered for cancellation. + * A task is considered for cancellation when taskHeapUsage is greater than or equal to heapUsageMovingAverage * variance. + */ + private volatile double heapVarianceThreshold; + public static final Setting SETTING_HEAP_VARIANCE_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_variance", + Defaults.HEAP_VARIANCE_THRESHOLD, + 0.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the CPU usage threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long cpuTimeThreshold; + public static final Setting SETTING_CPU_TIME_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.cpu_time_threshold", + Defaults.CPU_TIME_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the elapsed time threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long elapsedTimeThreshold; + public static final Setting SETTING_ELAPSED_TIME_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.elapsed_time_threshold", + Defaults.ELAPSED_TIME_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public SearchShardTaskSettings(Settings settings, ClusterSettings clusterSettings) { + totalHeapThreshold = SETTING_TOTAL_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_TOTAL_HEAP_THRESHOLD, this::setTotalHeapThreshold); + + heapThreshold = SETTING_HEAP_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_THRESHOLD, this::setHeapThreshold); + + heapVarianceThreshold = SETTING_HEAP_VARIANCE_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_VARIANCE_THRESHOLD, this::setHeapVarianceThreshold); + + cpuTimeThreshold = SETTING_CPU_TIME_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_CPU_TIME_THRESHOLD, this::setCpuTimeThreshold); + + elapsedTimeThreshold = SETTING_ELAPSED_TIME_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_ELAPSED_TIME_THRESHOLD, this::setElapsedTimeThreshold); + } + + public double getTotalHeapThreshold() { + return totalHeapThreshold; + } + + public long getTotalHeapThresholdBytes() { + return (long) (HEAP_SIZE_BYTES * getTotalHeapThreshold()); + } + + private void setTotalHeapThreshold(double totalHeapThreshold) { + this.totalHeapThreshold = totalHeapThreshold; + } + + public double getHeapThreshold() { + return heapThreshold; + } + + public long getHeapThresholdBytes() { + return (long) (HEAP_SIZE_BYTES * getHeapThreshold()); + } + + private void setHeapThreshold(double heapThreshold) { + this.heapThreshold = heapThreshold; + } + + public double getHeapVarianceThreshold() { + return heapVarianceThreshold; + } + + private void setHeapVarianceThreshold(double heapVarianceThreshold) { + this.heapVarianceThreshold = heapVarianceThreshold; + } + + public long getCpuTimeThreshold() { + return cpuTimeThreshold; + } + + private void setCpuTimeThreshold(long cpuTimeThreshold) { + this.cpuTimeThreshold = cpuTimeThreshold; + } + + public long getElapsedTimeThreshold() { + return elapsedTimeThreshold; + } + + private void setElapsedTimeThreshold(long elapsedTimeThreshold) { + this.elapsedTimeThreshold = elapsedTimeThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java new file mode 100644 index 0000000000000..a853a139b096b --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains settings for search backpressure. + */ +package org.opensearch.search.backpressure.settings; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java new file mode 100644 index 0000000000000..8e35c724a8fef --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTracker.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.common.util.Streak; + +import java.util.function.BooleanSupplier; + +/** + * NodeDuressTracker is used to check if the node is in duress. + * + * @opensearch.internal + */ +public class NodeDuressTracker { + /** + * Tracks the number of consecutive breaches. + */ + private final Streak breaches = new Streak(); + + /** + * Predicate that returns true when the node is in duress. + */ + private final BooleanSupplier isNodeInDuress; + + public NodeDuressTracker(BooleanSupplier isNodeInDuress) { + this.isNodeInDuress = isNodeInDuress; + } + + /** + * Evaluates the predicate and returns the number of consecutive breaches. + */ + public int check() { + return breaches.record(isNodeInDuress.getAsBoolean()); + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java new file mode 100644 index 0000000000000..8f1842efa5771 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.Task; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; + +/** + * TaskResourceUsageTracker is used to track completions and cancellations of search related tasks. + * + * @opensearch.internal + */ +public abstract class TaskResourceUsageTracker { + /** + * Counts the number of cancellations made due to this tracker. + */ + private final AtomicLong cancellations = new AtomicLong(); + + public long incrementCancellations() { + return cancellations.incrementAndGet(); + } + + public long getCancellations() { + return cancellations.get(); + } + + /** + * Returns a unique name for this tracker. + */ + public abstract String name(); + + /** + * Notifies the tracker to update its state when a task execution completes. + */ + public abstract void update(Task task); + + /** + * Returns the cancellation reason for the given task, if it's eligible for cancellation. + */ + public abstract Optional cancellationReason(Task task); +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java new file mode 100644 index 0000000000000..da0532421391e --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains trackers to check if resource usage limits are breached on a node or task level. + */ +package org.opensearch.search.backpressure.trackers; diff --git a/server/src/main/java/org/opensearch/tasks/CancellableTask.java b/server/src/main/java/org/opensearch/tasks/CancellableTask.java index 439be2b630e84..336f5b1f4c244 100644 --- a/server/src/main/java/org/opensearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/opensearch/tasks/CancellableTask.java @@ -71,7 +71,7 @@ public CancellableTask( /** * This method is called by the task manager when this task is cancelled. */ - final void cancel(String reason) { + public void cancel(String reason) { assert reason != null; if (cancelled.compareAndSet(false, true)) { this.reason = reason; diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java new file mode 100644 index 0000000000000..2c302172cf6bc --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.ExceptionsHelper; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * TaskCancellation is a wrapper for a task and its cancellation reasons. + * + * @opensearch.internal + */ +public class TaskCancellation implements Comparable { + private final CancellableTask task; + private final List reasons; + private final List onCancelCallbacks; + + public TaskCancellation(CancellableTask task, List reasons, List onCancelCallbacks) { + this.task = task; + this.reasons = reasons; + this.onCancelCallbacks = onCancelCallbacks; + } + + public CancellableTask getTask() { + return task; + } + + public List getReasons() { + return reasons; + } + + public String getReasonString() { + return reasons.stream().map(Reason::getMessage).collect(Collectors.joining(", ")); + } + + /** + * Cancels the task and invokes all onCancelCallbacks. + */ + public void cancel() { + if (isEligibleForCancellation() == false) { + return; + } + + task.cancel(getReasonString()); + + List exceptions = new ArrayList<>(); + for (Runnable callback : onCancelCallbacks) { + try { + callback.run(); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + /** + * Returns the sum of all cancellation scores. + * + * A zero score indicates no reason to cancel the task. + * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. + */ + public int totalCancellationScore() { + return reasons.stream().mapToInt(Reason::getCancellationScore).sum(); + } + + /** + * A task is eligible for cancellation if it has one or more cancellation reasons, and is not already cancelled. + */ + public boolean isEligibleForCancellation() { + return (task.isCancelled() == false) && (reasons.size() > 0); + } + + @Override + public int compareTo(TaskCancellation other) { + return Integer.compare(totalCancellationScore(), other.totalCancellationScore()); + } + + /** + * Represents the cancellation reason for a task. + */ + public static class Reason { + private final String message; + private final int cancellationScore; + + public Reason(String message, int cancellationScore) { + this.message = message; + this.cancellationScore = cancellationScore; + } + + public String getMessage() { + return message; + } + + public int getCancellationScore() { + return cancellationScore; + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java index c3cad117390e4..b4806b531429e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; @@ -50,6 +51,7 @@ public class TaskResourceTrackingService implements RunnableTaskExecutionListene private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final List taskCompletionListeners = new ArrayList<>(); private final ThreadPool threadPool; private volatile boolean taskResourceTrackingEnabled; @@ -116,6 +118,16 @@ public void stopTracking(Task task) { } finally { resourceAwareTasks.remove(task.getId()); } + + List exceptions = new ArrayList<>(); + for (TaskCompletionListener listener : taskCompletionListeners) { + try { + listener.onTaskCompleted(task); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); } /** @@ -245,4 +257,14 @@ private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { return storedContext; } + /** + * Listener that gets invoked when a task execution completes. + */ + public interface TaskCompletionListener { + void onTaskCompleted(Task task); + } + + public void addTaskCompletionListener(TaskCompletionListener listener) { + this.taskCompletionListeners.add(listener); + } } diff --git a/server/src/test/java/org/opensearch/common/StreakTests.java b/server/src/test/java/org/opensearch/common/StreakTests.java new file mode 100644 index 0000000000000..80080ad3e4027 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/StreakTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.opensearch.common.util.Streak; +import org.opensearch.test.OpenSearchTestCase; + +public class StreakTests extends OpenSearchTestCase { + + public void testStreak() { + Streak streak = new Streak(); + + // Streak starts with zero. + assertEquals(0, streak.length()); + + // Streak increases on successive successful events. + streak.record(true); + assertEquals(1, streak.length()); + streak.record(true); + assertEquals(2, streak.length()); + streak.record(true); + assertEquals(3, streak.length()); + + // Streak resets to zero after an unsuccessful event. + streak.record(false); + assertEquals(0, streak.length()); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java b/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java new file mode 100644 index 0000000000000..415058992e081 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/MovingAverageTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +public class MovingAverageTests extends OpenSearchTestCase { + + public void testMovingAverage() { + MovingAverage ma = new MovingAverage(5); + + // No observations + assertEquals(0.0, ma.getAverage(), 0.0); + assertEquals(0, ma.getCount()); + + // Not enough observations + ma.record(1); + ma.record(2); + ma.record(3); + assertEquals(2.0, ma.getAverage(), 0.0); + assertEquals(3, ma.getCount()); + assertFalse(ma.isReady()); + + // Enough observations + ma.record(4); + ma.record(5); + ma.record(6); + assertEquals(4, ma.getAverage(), 0.0); + assertEquals(6, ma.getCount()); + assertTrue(ma.isReady()); + } + + public void testMovingAverageWithZeroSize() { + try { + new MovingAverage(0); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("window size must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java b/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java new file mode 100644 index 0000000000000..a52e97cdd835c --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/TokenBucketTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +public class TokenBucketTests extends OpenSearchTestCase { + + public void testTokenBucket() { + AtomicLong mockTimeNanos = new AtomicLong(); + LongSupplier mockTimeNanosSupplier = mockTimeNanos::get; + + // Token bucket that refills at 2 tokens/second and allows short bursts up to 3 operations. + TokenBucket tokenBucket = new TokenBucket(mockTimeNanosSupplier, 2.0 / TimeUnit.SECONDS.toNanos(1), 3); + + // Three operations succeed, fourth fails. + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by one second. Two operations succeed, third fails. + mockTimeNanos.addAndGet(TimeUnit.SECONDS.toNanos(1)); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by half a second. One operation succeeds, second fails. + mockTimeNanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(500)); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Clock moves ahead by many seconds, but the token bucket should be capped at the 'burst' capacity. + mockTimeNanos.addAndGet(TimeUnit.SECONDS.toNanos(10)); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertTrue(tokenBucket.request()); + assertFalse(tokenBucket.request()); + + // Ability to request fractional tokens. + mockTimeNanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(250)); + assertFalse(tokenBucket.request(1.0)); + assertTrue(tokenBucket.request(0.5)); + } + + public void testTokenBucketWithInvalidRate() { + try { + new TokenBucket(System::nanoTime, -1, 2); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("rate must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } + + public void testTokenBucketWithInvalidBurst() { + try { + new TokenBucket(System::nanoTime, 1, 0); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("burst must be greater than zero")); + return; + } + + fail("exception should have been thrown"); + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java new file mode 100644 index 0000000000000..ac5a8229718ba --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.trackers.NodeDuressTracker; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; + +public class SearchBackpressureServiceTests extends OpenSearchTestCase { + + public void testIsNodeInDuress() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + + AtomicReference cpuUsage = new AtomicReference<>(); + AtomicReference heapUsage = new AtomicReference<>(); + NodeDuressTracker cpuUsageTracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5); + NodeDuressTracker heapUsageTracker = new NodeDuressTracker(() -> heapUsage.get() >= 0.5); + + SearchBackpressureSettings settings = new SearchBackpressureSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + System::nanoTime, + List.of(cpuUsageTracker, heapUsageTracker), + Collections.emptyList() + ); + + // Node not in duress. + cpuUsage.set(0.0); + heapUsage.set(0.0); + assertFalse(service.isNodeInDuress()); + + // Node in duress; but not for many consecutive data points. + cpuUsage.set(1.0); + heapUsage.set(1.0); + assertFalse(service.isNodeInDuress()); + + // Node in duress for consecutive data points. + assertFalse(service.isNodeInDuress()); + assertTrue(service.isNodeInDuress()); + + // Node not in duress anymore. + cpuUsage.set(0.0); + heapUsage.set(0.0); + assertFalse(service.isNodeInDuress()); + } + + public void testTrackerStateUpdateOnTaskCompletion() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + LongSupplier mockTimeNanosSupplier = () -> TimeUnit.SECONDS.toNanos(1234); + TaskResourceUsageTracker mockTaskResourceUsageTracker = mock(TaskResourceUsageTracker.class); + + SearchBackpressureSettings settings = new SearchBackpressureSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + mockTimeNanosSupplier, + Collections.emptyList(), + List.of(mockTaskResourceUsageTracker) + ); + + // Record task completions to update the tracker state. Tasks other than SearchShardTask are ignored. + service.onTaskCompleted(createMockTaskWithResourceStats(CancellableTask.class, 100, 200)); + for (int i = 0; i < 100; i++) { + service.onTaskCompleted(createMockTaskWithResourceStats(SearchShardTask.class, 100, 200)); + } + assertEquals(100, service.getState().getCompletionCount()); + verify(mockTaskResourceUsageTracker, times(100)).update(any()); + } + + public void testInFlightCancellation() { + TaskResourceTrackingService mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + ThreadPool mockThreadPool = mock(ThreadPool.class); + AtomicLong mockTime = new AtomicLong(0); + LongSupplier mockTimeNanosSupplier = mockTime::get; + NodeDuressTracker mockNodeDuressTracker = new NodeDuressTracker(() -> true); + + TaskResourceUsageTracker mockTaskResourceUsageTracker = new TaskResourceUsageTracker() { + @Override + public String name() { + return "mock_tracker"; + } + + @Override + public void update(Task task) {} + + @Override + public Optional cancellationReason(Task task) { + if (task.getTotalResourceStats().getCpuTimeInNanos() < 300) { + return Optional.empty(); + } + + return Optional.of(new TaskCancellation.Reason("limits exceeded", 5)); + } + }; + + // Mocking 'settings' with predictable rate limiting thresholds. + SearchBackpressureSettings settings = spy( + new SearchBackpressureSettings( + Settings.builder() + .put(SearchBackpressureSettings.SETTING_ENFORCED.getKey(), true) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_RATIO.getKey(), 0.1) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_RATE.getKey(), 0.003) + .put(SearchBackpressureSettings.SETTING_CANCELLATION_BURST.getKey(), 10.0) + .build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ); + + SearchBackpressureService service = new SearchBackpressureService( + settings, + mockTaskResourceTrackingService, + mockThreadPool, + mockTimeNanosSupplier, + List.of(mockNodeDuressTracker), + List.of(mockTaskResourceUsageTracker) + ); + + // Run two iterations so that node is marked 'in duress' from the third iteration onwards. + service.doRun(); + service.doRun(); + + // Mocking 'settings' with predictable searchHeapThresholdBytes so that cancellation logic doesn't get skipped. + long taskHeapUsageBytes = 500; + SearchShardTaskSettings shardTaskSettings = mock(SearchShardTaskSettings.class); + when(shardTaskSettings.getTotalHeapThresholdBytes()).thenReturn(taskHeapUsageBytes); + when(settings.getSearchShardTaskSettings()).thenReturn(shardTaskSettings); + + // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). + Map activeTasks = new HashMap<>(); + for (long i = 0; i < 75; i++) { + if (i % 5 == 0) { + activeTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 500, taskHeapUsageBytes)); + } else { + activeTasks.put(i, createMockTaskWithResourceStats(SearchShardTask.class, 100, taskHeapUsageBytes)); + } + } + doReturn(activeTasks).when(mockTaskResourceTrackingService).getResourceAwareTasks(); + + // There are 15 tasks eligible for cancellation but only 10 will be cancelled (burst limit). + service.doRun(); + assertEquals(10, service.getState().getCancellationCount()); + assertEquals(1, service.getState().getLimitReachedCount()); + + // If the clock or completed task count haven't made sufficient progress, we'll continue to be rate-limited. + service.doRun(); + assertEquals(10, service.getState().getCancellationCount()); + assertEquals(2, service.getState().getLimitReachedCount()); + + // Simulate task completion to replenish some tokens. + // This will add 2 tokens (task count delta * cancellationRatio) to 'rateLimitPerTaskCompletion'. + for (int i = 0; i < 20; i++) { + service.onTaskCompleted(createMockTaskWithResourceStats(SearchShardTask.class, 100, taskHeapUsageBytes)); + } + service.doRun(); + assertEquals(12, service.getState().getCancellationCount()); + assertEquals(3, service.getState().getLimitReachedCount()); + + // Fast-forward the clock by one second to replenish some tokens. + // This will add 3 tokens (time delta * rate) to 'rateLimitPerTime'. + mockTime.addAndGet(TimeUnit.SECONDS.toNanos(1)); + service.doRun(); + assertEquals(15, service.getState().getCancellationCount()); + assertEquals(3, service.getState().getLimitReachedCount()); // no more tasks to cancel; limit not reached + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java new file mode 100644 index 0000000000000..472ba95566523 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class NodeDuressTrackerTests extends OpenSearchTestCase { + + public void testNodeDuressTracker() { + AtomicReference cpuUsage = new AtomicReference<>(0.0); + NodeDuressTracker tracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5); + + // Node not in duress. + assertEquals(0, tracker.check()); + + // Node in duress; the streak must keep increasing. + cpuUsage.set(0.7); + assertEquals(1, tracker.check()); + assertEquals(2, tracker.check()); + assertEquals(3, tracker.check()); + + // Node not in duress anymore. + cpuUsage.set(0.3); + assertEquals(0, tracker.check()); + assertEquals(0, tracker.check()); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java new file mode 100644 index 0000000000000..d6a82702e074d --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +public class TaskCancellationTests extends OpenSearchTestCase { + + public void testTaskCancellation() { + SearchShardTask mockTask = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); + + TaskResourceUsageTracker mockTracker1 = createMockTaskResourceUsageTracker("mock_tracker_1"); + TaskResourceUsageTracker mockTracker2 = createMockTaskResourceUsageTracker("mock_tracker_2"); + TaskResourceUsageTracker mockTracker3 = createMockTaskResourceUsageTracker("mock_tracker_3"); + + List reasons = new ArrayList<>(); + List callbacks = List.of(mockTracker1::incrementCancellations, mockTracker2::incrementCancellations); + TaskCancellation taskCancellation = new TaskCancellation(mockTask, reasons, callbacks); + + // Task does not have any reason to be cancelled. + assertEquals(0, taskCancellation.totalCancellationScore()); + assertFalse(taskCancellation.isEligibleForCancellation()); + taskCancellation.cancel(); + assertEquals(0, mockTracker1.getCancellations()); + assertEquals(0, mockTracker2.getCancellations()); + assertEquals(0, mockTracker3.getCancellations()); + + // Task has one or more reasons to be cancelled. + reasons.add(new TaskCancellation.Reason("limits exceeded 1", 10)); + reasons.add(new TaskCancellation.Reason("limits exceeded 2", 20)); + reasons.add(new TaskCancellation.Reason("limits exceeded 3", 5)); + assertEquals(35, taskCancellation.totalCancellationScore()); + assertTrue(taskCancellation.isEligibleForCancellation()); + + // Cancel the task and validate the cancellation reason and invocation of callbacks. + taskCancellation.cancel(); + assertTrue(mockTask.getReasonCancelled().contains("limits exceeded 1, limits exceeded 2, limits exceeded 3")); + assertEquals(1, mockTracker1.getCancellations()); + assertEquals(1, mockTracker2.getCancellations()); + assertEquals(0, mockTracker3.getCancellations()); + } + + private static TaskResourceUsageTracker createMockTaskResourceUsageTracker(String name) { + return new TaskResourceUsageTracker() { + @Override + public String name() { + return name; + } + + @Override + public void update(Task task) {} + + @Override + public Optional cancellationReason(Task task) { + return Optional.empty(); + } + }; + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java new file mode 100644 index 0000000000000..ba3653d0b4a84 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.TaskResourceUsage; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchBackpressureTestHelpers extends OpenSearchTestCase { + + public static T createMockTaskWithResourceStats(Class type, long cpuUsage, long heapUsage) { + return createMockTaskWithResourceStats(type, cpuUsage, heapUsage, 0); + } + + public static T createMockTaskWithResourceStats( + Class type, + long cpuUsage, + long heapUsage, + long startTimeNanos + ) { + T task = mock(type); + when(task.getTotalResourceStats()).thenReturn(new TaskResourceUsage(cpuUsage, heapUsage)); + when(task.getStartTimeNanos()).thenReturn(startTimeNanos); + + AtomicBoolean isCancelled = new AtomicBoolean(false); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(task).cancel(anyString()); + doAnswer(invocation -> isCancelled.get()).when(task).isCancelled(); + + return task; + } +} From f1995b951abab34935bf8e5dee91097efbf5503e Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 14 Oct 2022 09:37:02 -0700 Subject: [PATCH 060/122] Bump Tika from 2.4.0 to 2.5.0 addressing CVE-2022-33879. (#4791) * Bump Tika from 2.4.0 to 2.5.0 addressing CVE-2022-33879. Signed-off-by: Marc Handalian * Add missing SHAs. Signed-off-by: Marc Handalian * Update changelog with PR info. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 | 1 + .../licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 | 1 - .../licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 | 1 + .../licenses/tika-parsers-standard-package-2.4.0.jar.sha1 | 1 - .../licenses/tika-parsers-standard-package-2.5.0.jar.sha1 | 1 + 8 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 15dedbde22d41..cfbbe01ae8494 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) +- Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 8f952f7619ac1..7bf67769cda10 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } versions << [ - 'tika' : '2.4.0', + 'tika' : '2.5.0', 'pdfbox': '2.0.25', 'poi' : '5.2.2', 'mime4j': '0.8.3' diff --git a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 deleted file mode 100644 index 373b7ec63138a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97b2454943127857a8304319be658d6d7ff4fff1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..419f01c631375 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 @@ -0,0 +1 @@ +7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 deleted file mode 100644 index cf724f4ee1de4..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57901d6088b0e34999e25af6b363ccec959b5e61 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..a9e47ff8a8a86 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 @@ -0,0 +1 @@ +649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 deleted file mode 100644 index ec03a055a6f6d..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83522360364a93e819eaec74f393bc56ed1d466a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..d648183868034 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 @@ -0,0 +1 @@ +2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file From e44158d4d10d4f8905895ffa50bf9398b8550667 Mon Sep 17 00:00:00 2001 From: Leonidas Spyropoulos Date: Fri, 14 Oct 2022 17:52:59 +0100 Subject: [PATCH 061/122] Apply reproducible builds to plugins (#4746) As per gradle [docs] add support to remove timestamps and package with same order which is required from [reproducible] builds This is a result of the discussion in https://github.com/opensearch-project/opensearch-plugin-template-java/pull/38 to apply for all plugins going forward. [docs]: https://docs.gradle.org/current/userguide/working_with_files.html#sec:reproducible_archives [reproducible]: https://reproducible-builds.org/ Signed-off-by: Leonidas Spyropoulos Signed-off-by: Leonidas Spyropoulos Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + .../org/opensearch/gradle/plugin/PluginBuildPlugin.groovy | 8 +++++++- .../opensearch/gradle/plugin/PluginBuildPluginTests.java | 5 +++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfbbe01ae8494..963ccdcdc9a6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) +- Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 31677965ab0d3..b7c78991a0da3 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -29,13 +29,13 @@ package org.opensearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.gradle.api.tasks.bundling.AbstractArchiveTask import org.opensearch.gradle.BuildPlugin import org.opensearch.gradle.NoticeTask import org.opensearch.gradle.Version import org.opensearch.gradle.VersionProperties import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin import org.opensearch.gradle.info.BuildParams -import org.opensearch.gradle.plugin.PluginPropertiesExtension import org.opensearch.gradle.test.RestTestBasePlugin import org.opensearch.gradle.testclusters.RunTask import org.opensearch.gradle.util.Util @@ -134,6 +134,12 @@ class PluginBuildPlugin implements Plugin { } project.configurations.getByName('default') .extendsFrom(project.configurations.getByName('runtimeClasspath')) + project.tasks.withType(AbstractArchiveTask.class).configureEach { task -> + // ignore file timestamps + // be consistent in archive file order + task.preserveFileTimestamps = false + task.reproducibleFileOrder = true + } // allow running ES with this plugin in the foreground of a build project.tasks.register('run', RunTask) { dependsOn(project.tasks.bundlePlugin) diff --git a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java index 9ed0e3e494992..8772a9fbd65ee 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.plugin; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.opensearch.gradle.BwcVersions; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; @@ -64,6 +65,10 @@ public void testApply() { assertNotNull("plugin extensions has the right type", project.getExtensions().findByType(PluginPropertiesExtension.class)); assertNull("plugin should not create the integTest task", project.getTasks().findByName("integTest")); + project.getTasks().withType(AbstractArchiveTask.class).forEach(t -> { + assertFalse(String.format("task '%s' should not preserve timestamps", t.getName()), t.isPreserveFileTimestamps()); + assertTrue(String.format("task '%s' should have reproducible file order", t.getName()), t.isReproducibleFileOrder()); + }); } @Ignore("https://github.com/elastic/elasticsearch/issues/47123") From 6ac0c7cd2054da9a6492c76011d1dcd2e0f518b8 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Mon, 17 Oct 2022 14:51:56 -0700 Subject: [PATCH 062/122] Fix recovery path for searchable snapshots (#4813) Replicas are bootstrapped using the recovery path, as opposed to the restore path used for creating the primary shard. This has been broken in the initial implementation of searchable snapshots. The fix here is to put in the appropriate checks to avoid failing during recovery. I've also updated the integration test to ensure the recovery path is always exercised during testing. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../opensearch/snapshots/SearchableSnapshotIT.java | 13 +++++++++++-- .../indices/recovery/PeerRecoveryTargetService.java | 9 ++++++--- .../opensearch/indices/recovery/RecoveryTarget.java | 11 +++++++---- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 963ccdcdc9a6b..25efcc838553a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -137,6 +137,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) +- Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 96fcf0053c9ab..0ab025bb575cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -49,15 +49,24 @@ protected boolean addMockInternalEngine() { } public void testCreateSearchableSnapshot() throws Exception { + final int numReplicasIndex1 = randomIntBetween(1, 4); + final int numReplicasIndex2 = randomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); final Client client = client(); createRepository("test-repo", "fs"); createIndex( "test-idx-1", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex1)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() ); createIndex( "test-idx-2", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex2)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() ); ensureGreen(); indexRandomDocs("test-idx-1", 100); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index b5702431ed4bf..556e4db3400e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -54,6 +54,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; @@ -244,8 +245,10 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node"; logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); - boolean remoteTranslogEnabled = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); - final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!remoteTranslogEnabled); + final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final boolean hasNoTranslog = IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog) == false; + final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]"; startRequest = getStartRecoveryRequest( @@ -253,7 +256,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi clusterService.localNode(), recoveryTarget, startingSeqNo, - !remoteTranslogEnabled + verifyTranslog ); requestToSend = startRequest; actionName = PeerRecoverySourceService.Actions.START_RECOVERY; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index c1e29e0d866d8..b6122dbeeea09 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -44,6 +44,7 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.IndexModule; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -355,10 +356,12 @@ public void cleanFiles( try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetadata); - // If Segment Replication is enabled, we need to reuse the primary's translog UUID already stored in the index. - // With Segrep, replicas should never create their own commit points. This ensures the index and xlog share the same - // UUID without the extra step to associate the index with a new xlog. - if (indexShard.indexSettings().isSegRepEnabled()) { + // Replicas for segment replication or remote snapshot indices do not create + // their own commit points and therefore do not modify the commit user data + // in their store. In these cases, reuse the primary's translog UUID. + final boolean reuseTranslogUUID = indexShard.indexSettings().isSegRepEnabled() + || IndexModule.Type.REMOTE_SNAPSHOT.match(indexShard.indexSettings()); + if (reuseTranslogUUID) { final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), From 1d654851ff2ea50b0fbb07b7602945f3a0e4cc88 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Mon, 17 Oct 2022 16:20:47 -0700 Subject: [PATCH 063/122] Refactor BalancedAllocator.Balancer to LocalShardsBalancer (#4761) * Refactor BalancedAllocator.Balancer to LocalShardsBalancer Signed-off-by: Kunal Kotwani * Deprecate Balancer to maintain BWC Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- CHANGELOG.md | 1 + .../allocation/AllocationConstraints.java | 7 +- .../allocator/BalancedShardsAllocator.java | 967 +----------------- .../allocator/LocalShardsBalancer.java | 967 ++++++++++++++++++ .../allocation/allocator/ShardsBalancer.java | 75 ++ .../AllocationConstraintsTests.java | 4 +- .../allocation/BalancedSingleShardTests.java | 4 +- 7 files changed, 1083 insertions(+), 942 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 25efcc838553a..4f6e5425f37db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Refactored BalancedAllocator.Balancer to LocalShardsBalancer ([#4761](https://github.com/opensearch-project/OpenSearch/pull/4761)) ### Deprecated ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 8c2c85ce107a6..3d9847ca35931 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -6,6 +6,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import java.util.ArrayList; import java.util.List; @@ -27,11 +28,11 @@ public AllocationConstraints() { } class ConstraintParams { - private BalancedShardsAllocator.Balancer balancer; + private ShardsBalancer balancer; private BalancedShardsAllocator.ModelNode node; private String index; - ConstraintParams(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + ConstraintParams(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { this.balancer = balancer; this.node = node; this.index = index; @@ -50,7 +51,7 @@ class ConstraintParams { * This weight function is used only in case of unassigned shards to avoid overloading a newly added node. * Weight calculation in other scenarios like shard movement and re-balancing remain unaffected by this function. */ - public long weight(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + public long weight(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { int constraintsBreached = 0; ConstraintParams params = new ConstraintParams(balancer, node, index); for (Predicate predicate : constraintPredicates) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 181910e3ac1c4..42c8f7987bf3d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,47 +34,28 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationConstraints; -import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.MoveDecision; -import org.opensearch.cluster.routing.allocation.NodeAllocationResult; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; -import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.opensearch.cluster.routing.allocation.decider.Decision; -import org.opensearch.cluster.routing.allocation.decider.Decision.Type; -import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.gateway.PriorityComparator; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.StreamSupport; - -import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; /** * The {@link BalancedShardsAllocator} re-balances the nodes allocations @@ -160,23 +141,23 @@ public void allocate(RoutingAllocation allocation) { failAllocationOfNewPrimaries(allocation); return; } - final Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); - balancer.allocateUnassigned(); - balancer.moveShards(); - balancer.balance(); + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + localShardsBalancer.allocateUnassigned(); + localShardsBalancer.moveShards(); + localShardsBalancer.balance(); } @Override public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) { - Balancer balancer = new Balancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; if (shard.unassigned()) { - allocateUnassignedDecision = balancer.decideAllocateUnassigned(shard); + allocateUnassignedDecision = localShardsBalancer.decideAllocateUnassigned(shard); } else { - moveDecision = balancer.decideMove(shard); + moveDecision = localShardsBalancer.decideMove(shard); if (moveDecision.isDecisionTaken() && moveDecision.canRemain()) { - MoveDecision rebalanceDecision = balancer.decideRebalance(shard); + MoveDecision rebalanceDecision = localShardsBalancer.decideRebalance(shard); moveDecision = rebalanceDecision.withRemainDecision(moveDecision.getCanRemainDecision()); } } @@ -277,923 +258,18 @@ static class WeightFunction { this.constraints = new AllocationConstraints(); } - public float weightWithAllocationConstraints(Balancer balancer, ModelNode node, String index) { + public float weightWithAllocationConstraints(ShardsBalancer balancer, ModelNode node, String index) { float balancerWeight = weight(balancer, node, index); return balancerWeight + constraints.weight(balancer, node, index); } - float weight(Balancer balancer, ModelNode node, String index) { + float weight(ShardsBalancer balancer, ModelNode node, String index) { final float weightShard = node.numShards() - balancer.avgShardsPerNode(); final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } } - /** - * A {@link Balancer} - * - * @opensearch.internal - */ - public static class Balancer { - private final Logger logger; - private final Map nodes; - private final RoutingAllocation allocation; - private final RoutingNodes routingNodes; - private final boolean movePrimaryFirst; - private final WeightFunction weight; - - private final float threshold; - private final Metadata metadata; - private final float avgShardsPerNode; - private final NodeSorter sorter; - private final Set inEligibleTargetNode; - - public Balancer(Logger logger, RoutingAllocation allocation, boolean movePrimaryFirst, WeightFunction weight, float threshold) { - this.logger = logger; - this.allocation = allocation; - this.movePrimaryFirst = movePrimaryFirst; - this.weight = weight; - this.threshold = threshold; - this.routingNodes = allocation.routingNodes(); - this.metadata = allocation.metadata(); - avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); - nodes = Collections.unmodifiableMap(buildModelFromAssigned()); - sorter = newNodeSorter(); - inEligibleTargetNode = new HashSet<>(); - } - - /** - * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. - */ - private ModelNode[] nodesArray() { - return nodes.values().toArray(new ModelNode[nodes.size()]); - } - - /** - * Returns the average of shards per node for the given index - */ - public float avgShardsPerNode(String index) { - return ((float) metadata.index(index).getTotalNumberOfShards()) / nodes.size(); - } - - /** - * Returns the global average of shards per node - */ - public float avgShardsPerNode() { - return avgShardsPerNode; - } - - /** - * Returns a new {@link NodeSorter} that sorts the nodes based on their - * current weight with respect to the index passed to the sorter. The - * returned sorter is not sorted. Use {@link NodeSorter#reset(String)} - * to sort based on an index. - */ - private NodeSorter newNodeSorter() { - return new NodeSorter(nodesArray(), weight, this); - } - - /** - * The absolute value difference between two weights. - */ - private static float absDelta(float lower, float higher) { - assert higher >= lower : higher + " lt " + lower + " but was expected to be gte"; - return Math.abs(higher - lower); - } - - /** - * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. - * See {@link #THRESHOLD_SETTING} for defining the threshold. - */ - private static boolean lessThan(float delta, float threshold) { - /* deltas close to the threshold are "rounded" to the threshold manually - to prevent floating point problems if the delta is very close to the - threshold ie. 1.000000002 which can trigger unnecessary balance actions*/ - return delta <= (threshold + 0.001f); - } - - /** - * Balances the nodes on the cluster model according to the weight function. - * The actual balancing is delegated to {@link #balanceByWeights()} - */ - private void balance() { - if (logger.isTraceEnabled()) { - logger.trace("Start balancing cluster"); - } - if (allocation.hasPendingAsyncFetch()) { - /* - * see https://github.com/elastic/elasticsearch/issues/14387 - * if we allow rebalance operations while we are still fetching shard store data - * we might end up with unnecessary rebalance operations which can be super confusion/frustrating - * since once the fetches come back we might just move all the shards back again. - * Therefore we only do a rebalance if we have fetched all information. - */ - logger.debug("skipping rebalance due to in-flight shard/store fetches"); - return; - } - if (allocation.deciders().canRebalance(allocation).type() != Type.YES) { - logger.trace("skipping rebalance as it is disabled"); - return; - } - if (nodes.size() < 2) { /* skip if we only have one node */ - logger.trace("skipping rebalance as single node only"); - return; - } - balanceByWeights(); - } - - /** - * Makes a decision about moving a single shard to a different node to form a more - * optimally balanced cluster. This method is invoked from the cluster allocation - * explain API only. - */ - private MoveDecision decideRebalance(final ShardRouting shard) { - if (shard.started() == false) { - // we can only rebalance started shards - return MoveDecision.NOT_TAKEN; - } - - Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); - - sorter.reset(shard.getIndexName()); - ModelNode[] modelNodes = sorter.modelNodes; - final String currentNodeId = shard.currentNodeId(); - // find currently assigned node - ModelNode currentNode = null; - for (ModelNode node : modelNodes) { - if (node.getNodeId().equals(currentNodeId)) { - currentNode = node; - break; - } - } - assert currentNode != null : "currently assigned node could not be found"; - - // balance the shard, if a better node can be found - final String idxName = shard.getIndexName(); - final float currentWeight = weight.weight(this, currentNode, idxName); - final AllocationDeciders deciders = allocation.deciders(); - Type rebalanceDecisionType = Type.NO; - ModelNode assignedNode = null; - List> betterBalanceNodes = new ArrayList<>(); - List> sameBalanceNodes = new ArrayList<>(); - List> worseBalanceNodes = new ArrayList<>(); - for (ModelNode node : modelNodes) { - if (node == currentNode) { - continue; // skip over node we're currently allocated to - } - final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); - // the current weight of the node in the cluster, as computed by the weight function; - // this is a comparison of the number of shards on this node to the number of shards - // that should be on each node on average (both taking the cluster as a whole into account - // as well as shards per index) - final float nodeWeight = weight.weight(this, node, idxName); - // if the node we are examining has a worse (higher) weight than the node the shard is - // assigned to, then there is no way moving the shard to the node with the worse weight - // can make the balance of the cluster better, so we check for that here - final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; - boolean rebalanceConditionsMet = false; - if (betterWeightThanCurrent) { - // get the delta between the weights of the node we are checking and the node that holds the shard - float currentDelta = absDelta(nodeWeight, currentWeight); - // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, - // then even though the node we are examining has a better weight and may make the cluster balance - // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless - // the gains make it worth it, as defined by the threshold - boolean deltaAboveThreshold = lessThan(currentDelta, threshold) == false; - // calculate the delta of the weights of the two nodes if we were to add the shard to the - // node in question and move it away from the node that currently holds it. - // hence we add 2.0f to the weight delta - float proposedDelta = 2.0f + nodeWeight - currentWeight; - boolean betterWeightWithShardAdded = proposedDelta < currentDelta; - - rebalanceConditionsMet = deltaAboveThreshold && betterWeightWithShardAdded; - // if the simulated weight delta with the shard moved away is better than the weight delta - // with the shard remaining on the current node, and we are allowed to allocate to the - // node in question, then allow the rebalance - if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { - // rebalance to the node, only will get overwritten if the decision here is to - // THROTTLE and we get a decision with YES on another node - rebalanceDecisionType = canAllocate.type(); - assignedNode = node; - } - } - Tuple nodeResult = Tuple.tuple(node, canAllocate); - if (rebalanceConditionsMet) { - betterBalanceNodes.add(nodeResult); - } else if (betterWeightThanCurrent) { - sameBalanceNodes.add(nodeResult); - } else { - worseBalanceNodes.add(nodeResult); - } - } - - int weightRanking = 0; - List nodeDecisions = new ArrayList<>(modelNodes.length - 1); - for (Tuple result : betterBalanceNodes) { - nodeDecisions.add( - new NodeAllocationResult( - result.v1().routingNode.node(), - AllocationDecision.fromDecisionType(result.v2().type()), - result.v2(), - ++weightRanking - ) - ); - } - int currentNodeWeightRanking = ++weightRanking; - for (Tuple result : sameBalanceNodes) { - AllocationDecision nodeDecision = result.v2().type() == Type.NO ? AllocationDecision.NO : AllocationDecision.WORSE_BALANCE; - nodeDecisions.add( - new NodeAllocationResult(result.v1().routingNode.node(), nodeDecision, result.v2(), currentNodeWeightRanking) - ); - } - for (Tuple result : worseBalanceNodes) { - AllocationDecision nodeDecision = result.v2().type() == Type.NO ? AllocationDecision.NO : AllocationDecision.WORSE_BALANCE; - nodeDecisions.add(new NodeAllocationResult(result.v1().routingNode.node(), nodeDecision, result.v2(), ++weightRanking)); - } - - if (canRebalance.type() != Type.YES || allocation.hasPendingAsyncFetch()) { - AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() - ? AllocationDecision.AWAITING_INFO - : AllocationDecision.fromDecisionType(canRebalance.type()); - return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions); - } else { - return MoveDecision.rebalance( - canRebalance, - AllocationDecision.fromDecisionType(rebalanceDecisionType), - assignedNode != null ? assignedNode.routingNode.node() : null, - currentNodeWeightRanking, - nodeDecisions - ); - } - } - - /** - * Balances the nodes on the cluster model according to the weight - * function. The configured threshold is the minimum delta between the - * weight of the maximum node and the minimum node according to the - * {@link WeightFunction}. This weight is calculated per index to - * distribute shards evenly per index. The balancer tries to relocate - * shards only if the delta exceeds the threshold. In the default case - * the threshold is set to {@code 1.0} to enforce gaining relocation - * only, or in other words relocations that move the weight delta closer - * to {@code 0.0} - */ - private void balanceByWeights() { - final AllocationDeciders deciders = allocation.deciders(); - final ModelNode[] modelNodes = sorter.modelNodes; - final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices()) { - IndexMetadata indexMetadata = metadata.index(index); - - // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, - // move these nodes to the front of modelNodes so that we can only balance based on these nodes - int relevantNodes = 0; - for (int i = 0; i < modelNodes.length; i++) { - ModelNode modelNode = modelNodes[i]; - if (modelNode.getIndex(index) != null - || deciders.canAllocate(indexMetadata, modelNode.getRoutingNode(), allocation).type() != Type.NO) { - // swap nodes at position i and relevantNodes - modelNodes[i] = modelNodes[relevantNodes]; - modelNodes[relevantNodes] = modelNode; - relevantNodes++; - } - } - - if (relevantNodes < 2) { - continue; - } - - sorter.reset(index, 0, relevantNodes); - int lowIdx = 0; - int highIdx = relevantNodes - 1; - while (true) { - final ModelNode minNode = modelNodes[lowIdx]; - final ModelNode maxNode = modelNodes[highIdx]; - advance_range: if (maxNode.numShards(index) > 0) { - final float delta = absDelta(weights[lowIdx], weights[highIdx]); - if (lessThan(delta, threshold)) { - if (lowIdx > 0 - && highIdx - 1 > 0 // is there a chance for a higher delta? - && (absDelta(weights[0], weights[highIdx - 1]) > threshold) // check if we need to break at all - ) { - /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible - * due to some allocation decider restrictions like zone awareness. if one zone has for instance - * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we - * can't move to the "lighter" shards since otherwise the zone would go over capacity. - * - * This break jumps straight to the condition below were we start moving from the high index towards - * the low index to shrink the window we are considering for balance from the other direction. - * (check shrinking the window from MAX to MIN) - * See #3580 - */ - break advance_range; - } - if (logger.isTraceEnabled()) { - logger.trace( - "Stop balancing index [{}] min_node [{}] weight: [{}]" + " max_node [{}] weight: [{}] delta: [{}]", - index, - maxNode.getNodeId(), - weights[highIdx], - minNode.getNodeId(), - weights[lowIdx], - delta - ); - } - break; - } - if (logger.isTraceEnabled()) { - logger.trace( - "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", - maxNode.getNodeId(), - weights[highIdx], - minNode.getNodeId(), - weights[lowIdx], - delta - ); - } - if (delta <= 1.0f) { - /* - * prevent relocations that only swap the weights of the two nodes. a relocation must bring us closer to the - * balance if we only achieve the same delta the relocation is useless - * - * NB this comment above was preserved from an earlier version but doesn't obviously describe the code today. We - * already know that lessThan(delta, threshold) == false and threshold defaults to 1.0, so by default we never - * hit this case anyway. - */ - logger.trace( - "Couldn't find shard to relocate from node [{}] to node [{}]", - maxNode.getNodeId(), - minNode.getNodeId() - ); - } else if (tryRelocateShard(minNode, maxNode, index)) { - /* - * TODO we could be a bit smarter here, we don't need to fully sort necessarily - * we could just find the place to insert linearly but the win might be minor - * compared to the added complexity - */ - weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); - weights[highIdx] = sorter.weight(modelNodes[highIdx]); - sorter.sort(0, relevantNodes); - lowIdx = 0; - highIdx = relevantNodes - 1; - continue; - } - } - if (lowIdx < highIdx - 1) { - /* Shrinking the window from MIN to MAX - * we can't move from any shard from the min node lets move on to the next node - * and see if the threshold still holds. We either don't have any shard of this - * index on this node of allocation deciders prevent any relocation.*/ - lowIdx++; - } else if (lowIdx > 0) { - /* Shrinking the window from MAX to MIN - * now we go max to min since obviously we can't move anything to the max node - * lets pick the next highest */ - lowIdx = 0; - highIdx--; - } else { - /* we are done here, we either can't relocate anymore or we are balanced */ - break; - } - } - } - } - - /** - * This builds a initial index ordering where the indices are returned - * in most unbalanced first. We need this in order to prevent over - * allocations on added nodes from one index when the weight parameters - * for global balance overrule the index balance at an intermediate - * state. For example this can happen if we have 3 nodes and 3 indices - * with 3 primary and 1 replica shards. At the first stage all three nodes hold - * 2 shard for each index. Now we add another node and the first index - * is balanced moving three shards from two of the nodes over to the new node since it - * has no shards yet and global balance for the node is way below - * average. To re-balance we need to move shards back eventually likely - * to the nodes we relocated them from. - */ - private String[] buildWeightOrderedIndices() { - final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); - final float[] deltas = new float[indices.length]; - for (int i = 0; i < deltas.length; i++) { - sorter.reset(indices[i]); - deltas[i] = sorter.delta(); - } - new IntroSorter() { - - float pivotWeight; - - @Override - protected void swap(int i, int j) { - final String tmpIdx = indices[i]; - indices[i] = indices[j]; - indices[j] = tmpIdx; - final float tmpDelta = deltas[i]; - deltas[i] = deltas[j]; - deltas[j] = tmpDelta; - } - - @Override - protected int compare(int i, int j) { - return Float.compare(deltas[j], deltas[i]); - } - - @Override - protected void setPivot(int i) { - pivotWeight = deltas[i]; - } - - @Override - protected int comparePivot(int j) { - return Float.compare(deltas[j], pivotWeight); - } - }.sort(0, deltas.length); - - return indices; - } - - /** - * Checks if target node is ineligible and if so, adds to the list - * of ineligible target nodes - */ - private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { - Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); - if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { - inEligibleTargetNode.add(targetNode); - } - } - - /** - * Move started shards that can not be allocated to a node anymore - * - * For each shard to be moved this function executes a move operation - * to the minimal eligible node with respect to the - * weight function. If a shard is moved the shard will be set to - * {@link ShardRoutingState#RELOCATING} and a shadow instance of this - * shard is created with an incremented version in the state - * {@link ShardRoutingState#INITIALIZING}. - */ - public void moveShards() { - // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling - // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are - // offloading the shards. - - // Trying to eliminate target nodes so that we donot unnecessarily iterate over source nodes - // when no target is eligible - for (ModelNode currentNode : sorter.modelNodes) { - checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); - } - boolean primariesThrottled = false; - for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { - // Verify if the cluster concurrent recoveries have been reached. - if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { - logger.info( - "Cannot move any shard in the cluster due to cluster concurrent recoveries getting breached" - + ". Skipping shard iteration" - ); - return; - } - // Early terminate node interleaved shard iteration when no eligible target nodes are available - if (sorter.modelNodes.length == inEligibleTargetNode.size()) { - logger.info( - "Cannot move any shard in the cluster as there is no node on which shards can be allocated" - + ". Skipping shard iteration" - ); - return; - } - - ShardRouting shardRouting = it.next(); - - // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled - if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { - logger.info( - "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" - + "are being throttled. Skipping shard iteration" - ); - return; - } - - // Verify if the shard is allowed to move if outgoing recovery on the node hosting the primary shard - // is not being throttled. - Decision canMoveAwayDecision = allocation.deciders().canMoveAway(shardRouting, allocation); - if (canMoveAwayDecision.type() != Decision.Type.YES) { - if (logger.isDebugEnabled()) logger.debug("Cannot move away shard [{}] Skipping this shard", shardRouting); - if (shardRouting.primary() && canMoveAwayDecision.type() == Type.THROTTLE) { - primariesThrottled = true; - } - continue; - } - - final MoveDecision moveDecision = decideMove(shardRouting); - if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) { - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); - sourceNode.removeShard(shardRouting); - Tuple relocatingShards = routingNodes.relocateShard( - shardRouting, - targetNode.getNodeId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), - allocation.changes() - ); - targetNode.addShard(relocatingShards.v2()); - if (logger.isTraceEnabled()) { - logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); - } - - // Verifying if this node can be considered ineligible for further iterations - if (targetNode != null) { - checkAndAddInEligibleTargetNode(targetNode.getRoutingNode()); - } - } else if (moveDecision.isDecisionTaken() && moveDecision.canRemain() == false) { - logger.trace("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); - } - } - } - - /** - * Makes a decision on whether to move a started shard to another node. The following rules apply - * to the {@link MoveDecision} return object: - * 1. If the shard is not started, no decision will be taken and {@link MoveDecision#isDecisionTaken()} will return false. - * 2. If the shard is allowed to remain on its current node, no attempt will be made to move the shard and - * {@link MoveDecision#getCanRemainDecision} will have a decision type of YES. All other fields in the object will be null. - * 3. If the shard is not allowed to remain on its current node, then {@link MoveDecision#getAllocationDecision()} will be - * populated with the decision of moving to another node. If {@link MoveDecision#forceMove()} ()} returns {@code true}, then - * {@link MoveDecision#getTargetNode} will return a non-null value, otherwise the assignedNodeId will be null. - * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then - * {@link MoveDecision#getNodeDecisions} will have a non-null value. - */ - public MoveDecision decideMove(final ShardRouting shardRouting) { - if (shardRouting.started() == false) { - // we can only move started shards - return MoveDecision.NOT_TAKEN; - } - - final boolean explain = allocation.debugDecision(); - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - RoutingNode routingNode = sourceNode.getRoutingNode(); - Decision canRemain = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (canRemain.type() != Decision.Type.NO) { - return MoveDecision.stay(canRemain); - } - - sorter.reset(shardRouting.getIndexName()); - /* - * the sorter holds the minimum weight node first for the shards index. - * We now walk through the nodes until we find a node to allocate the shard. - * This is not guaranteed to be balanced after this operation we still try best effort to - * allocate on the minimal eligible node. - */ - Type bestDecision = Type.NO; - RoutingNode targetNode = null; - final List nodeExplanationMap = explain ? new ArrayList<>() : null; - int weightRanking = 0; - int targetNodeProcessed = 0; - for (ModelNode currentNode : sorter.modelNodes) { - if (currentNode != sourceNode) { - RoutingNode target = currentNode.getRoutingNode(); - if (!explain && inEligibleTargetNode.contains(target)) continue; - // don't use canRebalance as we want hard filtering rules to apply. See #17698 - if (!explain) { - // If we cannot allocate any shard to node marking it in eligible - Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(target, allocation); - if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { - inEligibleTargetNode.add(currentNode.getRoutingNode()); - continue; - } - } - targetNodeProcessed++; - // don't use canRebalance as we want hard filtering rules to apply. See #17698 - Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); - if (explain) { - nodeExplanationMap.add( - new NodeAllocationResult(currentNode.getRoutingNode().node(), allocationDecision, ++weightRanking) - ); - } - // TODO maybe we can respect throttling here too? - if (allocationDecision.type().higherThan(bestDecision)) { - bestDecision = allocationDecision.type(); - if (bestDecision == Type.YES) { - targetNode = target; - if (explain == false) { - // we are not in explain mode and already have a YES decision on the best weighted node, - // no need to continue iterating - break; - } - } - } - } - } - - return MoveDecision.cannotRemain( - canRemain, - AllocationDecision.fromDecisionType(bestDecision), - targetNode != null ? targetNode.node() : null, - nodeExplanationMap - ); - } - - /** - * Builds the internal model from all shards in the given - * {@link Iterable}. All shards in the {@link Iterable} must be assigned - * to a node. This method will skip shards in the state - * {@link ShardRoutingState#RELOCATING} since each relocating shard has - * a shadow shard in the state {@link ShardRoutingState#INITIALIZING} - * on the target node which we respect during the allocation / balancing - * process. In short, this method recreates the status-quo in the cluster. - */ - private Map buildModelFromAssigned() { - Map nodes = new HashMap<>(); - for (RoutingNode rn : routingNodes) { - ModelNode node = new ModelNode(rn); - nodes.put(rn.nodeId(), node); - for (ShardRouting shard : rn) { - assert rn.nodeId().equals(shard.currentNodeId()); - /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ - if (shard.state() != RELOCATING) { - node.addShard(shard); - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); - } - } - } - } - return nodes; - } - - /** - * Allocates all given shards on the minimal eligible node for the shards index - * with respect to the weight function. All given shards must be unassigned. - */ - private void allocateUnassigned() { - RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - assert !nodes.isEmpty(); - if (logger.isTraceEnabled()) { - logger.trace("Start allocating unassigned shards"); - } - if (unassigned.isEmpty()) { - return; - } - - /* - * TODO: We could be smarter here and group the shards by index and then - * use the sorter to save some iterations. - */ - final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); - final Comparator comparator = (o1, o2) -> { - if (o1.primary() ^ o2.primary()) { - return o1.primary() ? -1 : 1; - } - final int indexCmp; - if ((indexCmp = o1.getIndexName().compareTo(o2.getIndexName())) == 0) { - return o1.getId() - o2.getId(); - } - // this comparator is more expensive than all the others up there - // that's why it's added last even though it could be easier to read - // if we'd apply it earlier. this comparator will only differentiate across - // indices all shards of the same index is treated equally. - final int secondary = secondaryComparator.compare(o1, o2); - return secondary == 0 ? indexCmp : secondary; - }; - /* - * we use 2 arrays and move replicas to the second array once we allocated an identical - * replica in the current iteration to make sure all indices get allocated in the same manner. - * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with - * 2 replica and 1 shard would look like: - * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] - * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with - * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. - */ - ShardRouting[] primary = unassigned.drain(); - ShardRouting[] secondary = new ShardRouting[primary.length]; - int secondaryLength = 0; - int primaryLength = primary.length; - ArrayUtil.timSort(primary, comparator); - do { - for (int i = 0; i < primaryLength; i++) { - ShardRouting shard = primary[i]; - final AllocateUnassignedDecision allocationDecision = decideAllocateUnassigned(shard); - final String assignedNodeId = allocationDecision.getTargetNode() != null - ? allocationDecision.getTargetNode().getId() - : null; - final ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null; - - if (allocationDecision.getAllocationDecision() == AllocationDecision.YES) { - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); - } - - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); - minNode.addShard(shard); - if (!shard.primary()) { - // copy over the same replica shards to the secondary array so they will get allocated - // in a subsequent iteration, allowing replicas of other shards to be allocated first - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - secondary[secondaryLength++] = primary[++i]; - } - } - } else { - // did *not* receive a YES decision - if (logger.isTraceEnabled()) { - logger.trace( - "No eligible node found to assign shard [{}] allocation_status [{}]", - shard, - allocationDecision.getAllocationStatus() - ); - } - - if (minNode != null) { - // throttle decision scenario - assert allocationDecision.getAllocationStatus() == AllocationStatus.DECIDERS_THROTTLED; - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); - } else { - if (logger.isTraceEnabled()) { - logger.trace("No Node found to assign shard [{}]", shard); - } - } - - unassigned.ignoreShard(shard, allocationDecision.getAllocationStatus(), allocation.changes()); - if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationDecision.getAllocationStatus(), allocation.changes()); - } - } - } - } - primaryLength = secondaryLength; - ShardRouting[] tmp = primary; - primary = secondary; - secondary = tmp; - secondaryLength = 0; - } while (primaryLength > 0); - // clear everything we have either added it or moved to ignoreUnassigned - } - - /** - * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the - * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the - * {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned - * is of type {@link Type#NO}, then the assigned node will be null. - */ - private AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { - if (shard.assignedToNode()) { - // we only make decisions for unassigned shards here - return AllocateUnassignedDecision.NOT_TAKEN; - } - - final boolean explain = allocation.debugDecision(); - Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation); - if (shardLevelDecision.type() == Type.NO && explain == false) { - // NO decision for allocating the shard, irrespective of any particular node, so exit early - return AllocateUnassignedDecision.no(AllocationStatus.DECIDERS_NO, null); - } - - /* find an node with minimal weight we can allocate on*/ - float minWeight = Float.POSITIVE_INFINITY; - ModelNode minNode = null; - Decision decision = null; - /* Don't iterate over an identity hashset here the - * iteration order is different for each run and makes testing hard */ - Map nodeExplanationMap = explain ? new HashMap<>() : null; - List> nodeWeights = explain ? new ArrayList<>() : null; - for (ModelNode node : nodes.values()) { - if (node.containsShard(shard) && explain == false) { - // decision is NO without needing to check anything further, so short circuit - continue; - } - - // weight of this index currently on the node - float currentWeight = weight.weightWithAllocationConstraints(this, node, shard.getIndexName()); - // moving the shard would not improve the balance, and we are not in explain mode, so short circuit - if (currentWeight > minWeight && explain == false) { - continue; - } - - Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); - if (explain) { - nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); - nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); - } - if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { - final boolean updateMinNode; - if (currentWeight == minWeight) { - /* we have an equal weight tie breaking: - * 1. if one decision is YES prefer it - * 2. prefer the node that holds the primary for this index with the next id in the ring ie. - * for the 3 shards 2 replica case we try to build up: - * 1 2 0 - * 2 0 1 - * 0 1 2 - * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater - * than the id of the shard we need to assign. This works find when new indices are created since - * primaries are added first and we only add one shard set a time in this algorithm. - */ - if (currentDecision.type() == decision.type()) { - final int repId = shard.id(); - final int nodeHigh = node.highestPrimary(shard.index().getName()); - final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); - updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) - && (nodeHigh < minNodeHigh)) || (nodeHigh > repId && minNodeHigh < repId)); - } else { - updateMinNode = currentDecision.type() == Type.YES; - } - } else { - updateMinNode = currentWeight < minWeight; - } - if (updateMinNode) { - minNode = node; - minWeight = currentWeight; - decision = currentDecision; - } - } - } - if (decision == null) { - // decision was not set and a node was not assigned, so treat it as a NO decision - decision = Decision.NO; - } - List nodeDecisions = null; - if (explain) { - nodeDecisions = new ArrayList<>(); - // fill in the correct weight ranking, once we've been through all nodes - nodeWeights.sort((nodeWeight1, nodeWeight2) -> Float.compare(nodeWeight1.v2(), nodeWeight2.v2())); - int weightRanking = 0; - for (Tuple nodeWeight : nodeWeights) { - NodeAllocationResult current = nodeExplanationMap.get(nodeWeight.v1()); - nodeDecisions.add(new NodeAllocationResult(current.getNode(), current.getCanAllocateDecision(), ++weightRanking)); - } - } - return AllocateUnassignedDecision.fromDecision(decision, minNode != null ? minNode.routingNode.node() : null, nodeDecisions); - } - - private static final Comparator BY_DESCENDING_SHARD_ID = Comparator.comparing(ShardRouting::shardId).reversed(); - - /** - * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the - * balance model. Iff this method returns a true the relocation has already been executed on the - * simulation model as well as on the cluster. - */ - private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String idx) { - final ModelIndex index = maxNode.getIndex(idx); - if (index != null) { - logger.trace("Try relocating shard of [{}] from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); - final Iterable shardRoutings = StreamSupport.stream(index.spliterator(), false) - .filter(ShardRouting::started) // cannot rebalance unassigned, initializing or relocating shards anyway - .filter(maxNode::containsShard) - .sorted(BY_DESCENDING_SHARD_ID) // check in descending order of shard id so that the decision is deterministic - ::iterator; - - final AllocationDeciders deciders = allocation.deciders(); - for (ShardRouting shard : shardRoutings) { - final Decision rebalanceDecision = deciders.canRebalance(shard, allocation); - if (rebalanceDecision.type() == Type.NO) { - continue; - } - final Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); - if (allocationDecision.type() == Type.NO) { - continue; - } - - final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); - - maxNode.removeShard(shard); - long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - - if (decision.type() == Type.YES) { - /* only allocate on the cluster if we are not throttled */ - logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); - minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); - return true; - } else { - /* allocate on the model even if throttled */ - logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); - assert decision.type() == Type.THROTTLE; - minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); - return false; - } - } - } - logger.trace("No shards of [{}] can relocate from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); - return false; - } - - } - /** * A model node. * @@ -1277,6 +353,25 @@ public boolean containsShard(ShardRouting shard) { } + /** + * A {@link Balancer} used by the {@link BalancedShardsAllocator} to perform allocation operations + * @deprecated As of 2.4.0, replaced by {@link LocalShardsBalancer} + * + * @opensearch.internal + */ + @Deprecated + public static class Balancer extends LocalShardsBalancer { + public Balancer( + Logger logger, + RoutingAllocation allocation, + boolean movePrimaryFirst, + BalancedShardsAllocator.WeightFunction weight, + float threshold + ) { + super(logger, allocation, movePrimaryFirst, weight, threshold); + } + } + /** * A model index. * @@ -1346,10 +441,10 @@ static final class NodeSorter extends IntroSorter { final float[] weights; private final WeightFunction function; private String index; - private final Balancer balancer; + private final ShardsBalancer balancer; private float pivotWeight; - NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) { + NodeSorter(ModelNode[] modelNodes, WeightFunction function, ShardsBalancer balancer) { this.function = function; this.balancer = balancer; this.modelNodes = modelNodes; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java new file mode 100644 index 0000000000000..53d7c827392d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -0,0 +1,967 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.allocator; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.IntroSorter; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.AllocationDecision; +import org.opensearch.cluster.routing.allocation.MoveDecision; +import org.opensearch.cluster.routing.allocation.NodeAllocationResult; +import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.opensearch.common.collect.Tuple; +import org.opensearch.gateway.PriorityComparator; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.StreamSupport; + +import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; + +/** + * A {@link LocalShardsBalancer} used by the {@link BalancedShardsAllocator} to perform allocation operations + * for local shards within the cluster. + * + * @opensearch.internal + */ +public class LocalShardsBalancer extends ShardsBalancer { + private final Logger logger; + private final Map nodes; + private final RoutingAllocation allocation; + private final RoutingNodes routingNodes; + private final boolean movePrimaryFirst; + private final BalancedShardsAllocator.WeightFunction weight; + + private final float threshold; + private final Metadata metadata; + private final float avgShardsPerNode; + private final BalancedShardsAllocator.NodeSorter sorter; + private final Set inEligibleTargetNode; + + public LocalShardsBalancer( + Logger logger, + RoutingAllocation allocation, + boolean movePrimaryFirst, + BalancedShardsAllocator.WeightFunction weight, + float threshold + ) { + this.logger = logger; + this.allocation = allocation; + this.movePrimaryFirst = movePrimaryFirst; + this.weight = weight; + this.threshold = threshold; + this.routingNodes = allocation.routingNodes(); + this.metadata = allocation.metadata(); + avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); + nodes = Collections.unmodifiableMap(buildModelFromAssigned()); + sorter = newNodeSorter(); + inEligibleTargetNode = new HashSet<>(); + } + + /** + * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. + */ + private BalancedShardsAllocator.ModelNode[] nodesArray() { + return nodes.values().toArray(new BalancedShardsAllocator.ModelNode[nodes.size()]); + } + + /** + * Returns the average of shards per node for the given index + */ + @Override + public float avgShardsPerNode(String index) { + return ((float) metadata.index(index).getTotalNumberOfShards()) / nodes.size(); + } + + /** + * Returns the global average of shards per node + */ + @Override + public float avgShardsPerNode() { + return avgShardsPerNode; + } + + /** + * Returns a new {@link BalancedShardsAllocator.NodeSorter} that sorts the nodes based on their + * current weight with respect to the index passed to the sorter. The + * returned sorter is not sorted. Use {@link BalancedShardsAllocator.NodeSorter#reset(String)} + * to sort based on an index. + */ + private BalancedShardsAllocator.NodeSorter newNodeSorter() { + return new BalancedShardsAllocator.NodeSorter(nodesArray(), weight, this); + } + + /** + * The absolute value difference between two weights. + */ + private static float absDelta(float lower, float higher) { + assert higher >= lower : higher + " lt " + lower + " but was expected to be gte"; + return Math.abs(higher - lower); + } + + /** + * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. + * See {@link BalancedShardsAllocator#THRESHOLD_SETTING} for defining the threshold. + */ + private static boolean lessThan(float delta, float threshold) { + /* deltas close to the threshold are "rounded" to the threshold manually + to prevent floating point problems if the delta is very close to the + threshold ie. 1.000000002 which can trigger unnecessary balance actions*/ + return delta <= (threshold + 0.001f); + } + + /** + * Balances the nodes on the cluster model according to the weight function. + * The actual balancing is delegated to {@link #balanceByWeights()} + */ + @Override + void balance() { + if (logger.isTraceEnabled()) { + logger.trace("Start balancing cluster"); + } + if (allocation.hasPendingAsyncFetch()) { + /* + * see https://github.com/elastic/elasticsearch/issues/14387 + * if we allow rebalance operations while we are still fetching shard store data + * we might end up with unnecessary rebalance operations which can be super confusion/frustrating + * since once the fetches come back we might just move all the shards back again. + * Therefore we only do a rebalance if we have fetched all information. + */ + logger.debug("skipping rebalance due to in-flight shard/store fetches"); + return; + } + if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) { + logger.trace("skipping rebalance as it is disabled"); + return; + } + if (nodes.size() < 2) { /* skip if we only have one node */ + logger.trace("skipping rebalance as single node only"); + return; + } + balanceByWeights(); + } + + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. This method is invoked from the cluster allocation + * explain API only. + */ + @Override + MoveDecision decideRebalance(final ShardRouting shard) { + if (shard.started() == false) { + // we can only rebalance started shards + return MoveDecision.NOT_TAKEN; + } + + Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); + + sorter.reset(shard.getIndexName()); + BalancedShardsAllocator.ModelNode[] modelNodes = sorter.modelNodes; + final String currentNodeId = shard.currentNodeId(); + // find currently assigned node + BalancedShardsAllocator.ModelNode currentNode = null; + for (BalancedShardsAllocator.ModelNode node : modelNodes) { + if (node.getNodeId().equals(currentNodeId)) { + currentNode = node; + break; + } + } + assert currentNode != null : "currently assigned node could not be found"; + + // balance the shard, if a better node can be found + final String idxName = shard.getIndexName(); + final float currentWeight = weight.weight(this, currentNode, idxName); + final AllocationDeciders deciders = allocation.deciders(); + Decision.Type rebalanceDecisionType = Decision.Type.NO; + BalancedShardsAllocator.ModelNode assignedNode = null; + List> betterBalanceNodes = new ArrayList<>(); + List> sameBalanceNodes = new ArrayList<>(); + List> worseBalanceNodes = new ArrayList<>(); + for (BalancedShardsAllocator.ModelNode node : modelNodes) { + if (node == currentNode) { + continue; // skip over node we're currently allocated to + } + final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); + // the current weight of the node in the cluster, as computed by the weight function; + // this is a comparison of the number of shards on this node to the number of shards + // that should be on each node on average (both taking the cluster as a whole into account + // as well as shards per index) + final float nodeWeight = weight.weight(this, node, idxName); + // if the node we are examining has a worse (higher) weight than the node the shard is + // assigned to, then there is no way moving the shard to the node with the worse weight + // can make the balance of the cluster better, so we check for that here + final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; + boolean rebalanceConditionsMet = false; + if (betterWeightThanCurrent) { + // get the delta between the weights of the node we are checking and the node that holds the shard + float currentDelta = absDelta(nodeWeight, currentWeight); + // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, + // then even though the node we are examining has a better weight and may make the cluster balance + // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless + // the gains make it worth it, as defined by the threshold + boolean deltaAboveThreshold = lessThan(currentDelta, threshold) == false; + // calculate the delta of the weights of the two nodes if we were to add the shard to the + // node in question and move it away from the node that currently holds it. + // hence we add 2.0f to the weight delta + float proposedDelta = 2.0f + nodeWeight - currentWeight; + boolean betterWeightWithShardAdded = proposedDelta < currentDelta; + + rebalanceConditionsMet = deltaAboveThreshold && betterWeightWithShardAdded; + // if the simulated weight delta with the shard moved away is better than the weight delta + // with the shard remaining on the current node, and we are allowed to allocate to the + // node in question, then allow the rebalance + if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { + // rebalance to the node, only will get overwritten if the decision here is to + // THROTTLE and we get a decision with YES on another node + rebalanceDecisionType = canAllocate.type(); + assignedNode = node; + } + } + Tuple nodeResult = Tuple.tuple(node, canAllocate); + if (rebalanceConditionsMet) { + betterBalanceNodes.add(nodeResult); + } else if (betterWeightThanCurrent) { + sameBalanceNodes.add(nodeResult); + } else { + worseBalanceNodes.add(nodeResult); + } + } + + int weightRanking = 0; + List nodeDecisions = new ArrayList<>(modelNodes.length - 1); + for (Tuple result : betterBalanceNodes) { + nodeDecisions.add( + new NodeAllocationResult( + result.v1().getRoutingNode().node(), + AllocationDecision.fromDecisionType(result.v2().type()), + result.v2(), + ++weightRanking + ) + ); + } + int currentNodeWeightRanking = ++weightRanking; + for (Tuple result : sameBalanceNodes) { + AllocationDecision nodeDecision = result.v2().type() == Decision.Type.NO + ? AllocationDecision.NO + : AllocationDecision.WORSE_BALANCE; + nodeDecisions.add( + new NodeAllocationResult(result.v1().getRoutingNode().node(), nodeDecision, result.v2(), currentNodeWeightRanking) + ); + } + for (Tuple result : worseBalanceNodes) { + AllocationDecision nodeDecision = result.v2().type() == Decision.Type.NO + ? AllocationDecision.NO + : AllocationDecision.WORSE_BALANCE; + nodeDecisions.add(new NodeAllocationResult(result.v1().getRoutingNode().node(), nodeDecision, result.v2(), ++weightRanking)); + } + + if (canRebalance.type() != Decision.Type.YES || allocation.hasPendingAsyncFetch()) { + AllocationDecision allocationDecision = allocation.hasPendingAsyncFetch() + ? AllocationDecision.AWAITING_INFO + : AllocationDecision.fromDecisionType(canRebalance.type()); + return MoveDecision.cannotRebalance(canRebalance, allocationDecision, currentNodeWeightRanking, nodeDecisions); + } else { + return MoveDecision.rebalance( + canRebalance, + AllocationDecision.fromDecisionType(rebalanceDecisionType), + assignedNode != null ? assignedNode.getRoutingNode().node() : null, + currentNodeWeightRanking, + nodeDecisions + ); + } + } + + /** + * Balances the nodes on the cluster model according to the weight + * function. The configured threshold is the minimum delta between the + * weight of the maximum node and the minimum node according to the + * {@link BalancedShardsAllocator.WeightFunction}. This weight is calculated per index to + * distribute shards evenly per index. The balancer tries to relocate + * shards only if the delta exceeds the threshold. In the default case + * the threshold is set to {@code 1.0} to enforce gaining relocation + * only, or in other words relocations that move the weight delta closer + * to {@code 0.0} + */ + private void balanceByWeights() { + final AllocationDeciders deciders = allocation.deciders(); + final BalancedShardsAllocator.ModelNode[] modelNodes = sorter.modelNodes; + final float[] weights = sorter.weights; + for (String index : buildWeightOrderedIndices()) { + IndexMetadata indexMetadata = metadata.index(index); + + // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, + // move these nodes to the front of modelNodes so that we can only balance based on these nodes + int relevantNodes = 0; + for (int i = 0; i < modelNodes.length; i++) { + BalancedShardsAllocator.ModelNode modelNode = modelNodes[i]; + if (modelNode.getIndex(index) != null + || deciders.canAllocate(indexMetadata, modelNode.getRoutingNode(), allocation).type() != Decision.Type.NO) { + // swap nodes at position i and relevantNodes + modelNodes[i] = modelNodes[relevantNodes]; + modelNodes[relevantNodes] = modelNode; + relevantNodes++; + } + } + + if (relevantNodes < 2) { + continue; + } + + sorter.reset(index, 0, relevantNodes); + int lowIdx = 0; + int highIdx = relevantNodes - 1; + while (true) { + final BalancedShardsAllocator.ModelNode minNode = modelNodes[lowIdx]; + final BalancedShardsAllocator.ModelNode maxNode = modelNodes[highIdx]; + advance_range: if (maxNode.numShards(index) > 0) { + final float delta = absDelta(weights[lowIdx], weights[highIdx]); + if (lessThan(delta, threshold)) { + if (lowIdx > 0 + && highIdx - 1 > 0 // is there a chance for a higher delta? + && (absDelta(weights[0], weights[highIdx - 1]) > threshold) // check if we need to break at all + ) { + /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible + * due to some allocation decider restrictions like zone awareness. if one zone has for instance + * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we + * can't move to the "lighter" shards since otherwise the zone would go over capacity. + * + * This break jumps straight to the condition below were we start moving from the high index towards + * the low index to shrink the window we are considering for balance from the other direction. + * (check shrinking the window from MAX to MIN) + * See #3580 + */ + break advance_range; + } + if (logger.isTraceEnabled()) { + logger.trace( + "Stop balancing index [{}] min_node [{}] weight: [{}]" + " max_node [{}] weight: [{}] delta: [{}]", + index, + maxNode.getNodeId(), + weights[highIdx], + minNode.getNodeId(), + weights[lowIdx], + delta + ); + } + break; + } + if (logger.isTraceEnabled()) { + logger.trace( + "Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", + maxNode.getNodeId(), + weights[highIdx], + minNode.getNodeId(), + weights[lowIdx], + delta + ); + } + if (delta <= 1.0f) { + /* + * prevent relocations that only swap the weights of the two nodes. a relocation must bring us closer to the + * balance if we only achieve the same delta the relocation is useless + * + * NB this comment above was preserved from an earlier version but doesn't obviously describe the code today. We + * already know that lessThan(delta, threshold) == false and threshold defaults to 1.0, so by default we never + * hit this case anyway. + */ + logger.trace( + "Couldn't find shard to relocate from node [{}] to node [{}]", + maxNode.getNodeId(), + minNode.getNodeId() + ); + } else if (tryRelocateShard(minNode, maxNode, index)) { + /* + * TODO we could be a bit smarter here, we don't need to fully sort necessarily + * we could just find the place to insert linearly but the win might be minor + * compared to the added complexity + */ + weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); + weights[highIdx] = sorter.weight(modelNodes[highIdx]); + sorter.sort(0, relevantNodes); + lowIdx = 0; + highIdx = relevantNodes - 1; + continue; + } + } + if (lowIdx < highIdx - 1) { + /* Shrinking the window from MIN to MAX + * we can't move from any shard from the min node lets move on to the next node + * and see if the threshold still holds. We either don't have any shard of this + * index on this node of allocation deciders prevent any relocation.*/ + lowIdx++; + } else if (lowIdx > 0) { + /* Shrinking the window from MAX to MIN + * now we go max to min since obviously we can't move anything to the max node + * lets pick the next highest */ + lowIdx = 0; + highIdx--; + } else { + /* we are done here, we either can't relocate anymore or we are balanced */ + break; + } + } + } + } + + /** + * This builds a initial index ordering where the indices are returned + * in most unbalanced first. We need this in order to prevent over + * allocations on added nodes from one index when the weight parameters + * for global balance overrule the index balance at an intermediate + * state. For example this can happen if we have 3 nodes and 3 indices + * with 3 primary and 1 replica shards. At the first stage all three nodes hold + * 2 shard for each index. Now we add another node and the first index + * is balanced moving three shards from two of the nodes over to the new node since it + * has no shards yet and global balance for the node is way below + * average. To re-balance we need to move shards back eventually likely + * to the nodes we relocated them from. + */ + private String[] buildWeightOrderedIndices() { + final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); + final float[] deltas = new float[indices.length]; + for (int i = 0; i < deltas.length; i++) { + sorter.reset(indices[i]); + deltas[i] = sorter.delta(); + } + new IntroSorter() { + + float pivotWeight; + + @Override + protected void swap(int i, int j) { + final String tmpIdx = indices[i]; + indices[i] = indices[j]; + indices[j] = tmpIdx; + final float tmpDelta = deltas[i]; + deltas[i] = deltas[j]; + deltas[j] = tmpDelta; + } + + @Override + protected int compare(int i, int j) { + return Float.compare(deltas[j], deltas[i]); + } + + @Override + protected void setPivot(int i) { + pivotWeight = deltas[i]; + } + + @Override + protected int comparePivot(int j) { + return Float.compare(deltas[j], pivotWeight); + } + }.sort(0, deltas.length); + + return indices; + } + + /** + * Checks if target node is ineligible and if so, adds to the list + * of ineligible target nodes + */ + private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { + Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); + if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { + inEligibleTargetNode.add(targetNode); + } + } + + /** + * Move started shards that can not be allocated to a node anymore + * + * For each shard to be moved this function executes a move operation + * to the minimal eligible node with respect to the + * weight function. If a shard is moved the shard will be set to + * {@link ShardRoutingState#RELOCATING} and a shadow instance of this + * shard is created with an incremented version in the state + * {@link ShardRoutingState#INITIALIZING}. + */ + @Override + void moveShards() { + // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling + // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are + // offloading the shards. + + // Trying to eliminate target nodes so that we donot unnecessarily iterate over source nodes + // when no target is eligible + for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { + checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); + } + boolean primariesThrottled = false; + for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { + // Verify if the cluster concurrent recoveries have been reached. + if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { + logger.info( + "Cannot move any shard in the cluster due to cluster concurrent recoveries getting breached" + + ". Skipping shard iteration" + ); + return; + } + // Early terminate node interleaved shard iteration when no eligible target nodes are available + if (sorter.modelNodes.length == inEligibleTargetNode.size()) { + logger.info( + "Cannot move any shard in the cluster as there is no node on which shards can be allocated" + + ". Skipping shard iteration" + ); + return; + } + + ShardRouting shardRouting = it.next(); + + // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled + if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { + logger.info( + "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" + + "are being throttled. Skipping shard iteration" + ); + return; + } + + // Verify if the shard is allowed to move if outgoing recovery on the node hosting the primary shard + // is not being throttled. + Decision canMoveAwayDecision = allocation.deciders().canMoveAway(shardRouting, allocation); + if (canMoveAwayDecision.type() != Decision.Type.YES) { + if (logger.isDebugEnabled()) logger.debug("Cannot move away shard [{}] Skipping this shard", shardRouting); + if (shardRouting.primary() && canMoveAwayDecision.type() == Decision.Type.THROTTLE) { + primariesThrottled = true; + } + continue; + } + + final MoveDecision moveDecision = decideMove(shardRouting); + if (moveDecision.isDecisionTaken() && moveDecision.forceMove()) { + final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + final BalancedShardsAllocator.ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); + sourceNode.removeShard(shardRouting); + Tuple relocatingShards = routingNodes.relocateShard( + shardRouting, + targetNode.getNodeId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes() + ); + targetNode.addShard(relocatingShards.v2()); + if (logger.isTraceEnabled()) { + logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); + } + + // Verifying if this node can be considered ineligible for further iterations + if (targetNode != null) { + checkAndAddInEligibleTargetNode(targetNode.getRoutingNode()); + } + } else if (moveDecision.isDecisionTaken() && moveDecision.canRemain() == false) { + logger.trace("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + } + } + } + + /** + * Makes a decision on whether to move a started shard to another node. The following rules apply + * to the {@link MoveDecision} return object: + * 1. If the shard is not started, no decision will be taken and {@link MoveDecision#isDecisionTaken()} will return false. + * 2. If the shard is allowed to remain on its current node, no attempt will be made to move the shard and + * {@link MoveDecision#getCanRemainDecision} will have a decision type of YES. All other fields in the object will be null. + * 3. If the shard is not allowed to remain on its current node, then {@link MoveDecision#getAllocationDecision()} will be + * populated with the decision of moving to another node. If {@link MoveDecision#forceMove()} ()} returns {@code true}, then + * {@link MoveDecision#getTargetNode} will return a non-null value, otherwise the assignedNodeId will be null. + * 4. If the method is invoked in explain mode (e.g. from the cluster allocation explain APIs), then + * {@link MoveDecision#getNodeDecisions} will have a non-null value. + */ + @Override + MoveDecision decideMove(final ShardRouting shardRouting) { + if (shardRouting.started() == false) { + // we can only move started shards + return MoveDecision.NOT_TAKEN; + } + + final boolean explain = allocation.debugDecision(); + final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + RoutingNode routingNode = sourceNode.getRoutingNode(); + Decision canRemain = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (canRemain.type() != Decision.Type.NO) { + return MoveDecision.stay(canRemain); + } + + sorter.reset(shardRouting.getIndexName()); + /* + * the sorter holds the minimum weight node first for the shards index. + * We now walk through the nodes until we find a node to allocate the shard. + * This is not guaranteed to be balanced after this operation we still try best effort to + * allocate on the minimal eligible node. + */ + Decision.Type bestDecision = Decision.Type.NO; + RoutingNode targetNode = null; + final List nodeExplanationMap = explain ? new ArrayList<>() : null; + int weightRanking = 0; + int targetNodeProcessed = 0; + for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { + if (currentNode != sourceNode) { + RoutingNode target = currentNode.getRoutingNode(); + if (!explain && inEligibleTargetNode.contains(target)) continue; + // don't use canRebalance as we want hard filtering rules to apply. See #17698 + if (!explain) { + // If we cannot allocate any shard to node marking it in eligible + Decision nodeLevelAllocationDecision = allocation.deciders().canAllocateAnyShardToNode(target, allocation); + if (nodeLevelAllocationDecision.type() != Decision.Type.YES) { + inEligibleTargetNode.add(currentNode.getRoutingNode()); + continue; + } + } + targetNodeProcessed++; + // don't use canRebalance as we want hard filtering rules to apply. See #17698 + Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); + if (explain) { + nodeExplanationMap.add( + new NodeAllocationResult(currentNode.getRoutingNode().node(), allocationDecision, ++weightRanking) + ); + } + // TODO maybe we can respect throttling here too? + if (allocationDecision.type().higherThan(bestDecision)) { + bestDecision = allocationDecision.type(); + if (bestDecision == Decision.Type.YES) { + targetNode = target; + if (explain == false) { + // we are not in explain mode and already have a YES decision on the best weighted node, + // no need to continue iterating + break; + } + } + } + } + } + + return MoveDecision.cannotRemain( + canRemain, + AllocationDecision.fromDecisionType(bestDecision), + targetNode != null ? targetNode.node() : null, + nodeExplanationMap + ); + } + + /** + * Builds the internal model from all shards in the given + * {@link Iterable}. All shards in the {@link Iterable} must be assigned + * to a node. This method will skip shards in the state + * {@link ShardRoutingState#RELOCATING} since each relocating shard has + * a shadow shard in the state {@link ShardRoutingState#INITIALIZING} + * on the target node which we respect during the allocation / balancing + * process. In short, this method recreates the status-quo in the cluster. + */ + private Map buildModelFromAssigned() { + Map nodes = new HashMap<>(); + for (RoutingNode rn : routingNodes) { + BalancedShardsAllocator.ModelNode node = new BalancedShardsAllocator.ModelNode(rn); + nodes.put(rn.nodeId(), node); + for (ShardRouting shard : rn) { + assert rn.nodeId().equals(shard.currentNodeId()); + /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ + if (shard.state() != RELOCATING) { + node.addShard(shard); + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + } + } + } + } + return nodes; + } + + /** + * Allocates all given shards on the minimal eligible node for the shards index + * with respect to the weight function. All given shards must be unassigned. + */ + @Override + void allocateUnassigned() { + RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); + assert !nodes.isEmpty(); + if (logger.isTraceEnabled()) { + logger.trace("Start allocating unassigned shards"); + } + if (unassigned.isEmpty()) { + return; + } + + /* + * TODO: We could be smarter here and group the shards by index and then + * use the sorter to save some iterations. + */ + final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); + final Comparator comparator = (o1, o2) -> { + if (o1.primary() ^ o2.primary()) { + return o1.primary() ? -1 : 1; + } + final int indexCmp; + if ((indexCmp = o1.getIndexName().compareTo(o2.getIndexName())) == 0) { + return o1.getId() - o2.getId(); + } + // this comparator is more expensive than all the others up there + // that's why it's added last even though it could be easier to read + // if we'd apply it earlier. this comparator will only differentiate across + // indices all shards of the same index is treated equally. + final int secondary = secondaryComparator.compare(o1, o2); + return secondary == 0 ? indexCmp : secondary; + }; + /* + * we use 2 arrays and move replicas to the second array once we allocated an identical + * replica in the current iteration to make sure all indices get allocated in the same manner. + * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with + * 2 replica and 1 shard would look like: + * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] + * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with + * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. + */ + ShardRouting[] primary = unassigned.drain(); + ShardRouting[] secondary = new ShardRouting[primary.length]; + int secondaryLength = 0; + int primaryLength = primary.length; + ArrayUtil.timSort(primary, comparator); + do { + for (int i = 0; i < primaryLength; i++) { + ShardRouting shard = primary[i]; + final AllocateUnassignedDecision allocationDecision = decideAllocateUnassigned(shard); + final String assignedNodeId = allocationDecision.getTargetNode() != null + ? allocationDecision.getTargetNode().getId() + : null; + final BalancedShardsAllocator.ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null; + + if (allocationDecision.getAllocationDecision() == AllocationDecision.YES) { + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); + } + + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); + minNode.addShard(shard); + if (!shard.primary()) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + secondary[secondaryLength++] = primary[++i]; + } + } + } else { + // did *not* receive a YES decision + if (logger.isTraceEnabled()) { + logger.trace( + "No eligible node found to assign shard [{}] allocation_status [{}]", + shard, + allocationDecision.getAllocationStatus() + ); + } + + if (minNode != null) { + // throttle decision scenario + assert allocationDecision.getAllocationStatus() == UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); + } else { + if (logger.isTraceEnabled()) { + logger.trace("No Node found to assign shard [{}]", shard); + } + } + + unassigned.ignoreShard(shard, allocationDecision.getAllocationStatus(), allocation.changes()); + if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + unassigned.ignoreShard(primary[++i], allocationDecision.getAllocationStatus(), allocation.changes()); + } + } + } + } + primaryLength = secondaryLength; + ShardRouting[] tmp = primary; + primary = secondary; + secondary = tmp; + secondaryLength = 0; + } while (primaryLength > 0); + // clear everything we have either added it or moved to ignoreUnassigned + } + + /** + * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the + * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the + * {@link BalancedShardsAllocator.ModelNode} representing the node that the shard should be assigned to. If the decision returned + * is of type {@link Decision.Type#NO}, then the assigned node will be null. + */ + @Override + AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { + if (shard.assignedToNode()) { + // we only make decisions for unassigned shards here + return AllocateUnassignedDecision.NOT_TAKEN; + } + + final boolean explain = allocation.debugDecision(); + Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation); + if (shardLevelDecision.type() == Decision.Type.NO && explain == false) { + // NO decision for allocating the shard, irrespective of any particular node, so exit early + return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null); + } + + /* find an node with minimal weight we can allocate on*/ + float minWeight = Float.POSITIVE_INFINITY; + BalancedShardsAllocator.ModelNode minNode = null; + Decision decision = null; + /* Don't iterate over an identity hashset here the + * iteration order is different for each run and makes testing hard */ + Map nodeExplanationMap = explain ? new HashMap<>() : null; + List> nodeWeights = explain ? new ArrayList<>() : null; + for (BalancedShardsAllocator.ModelNode node : nodes.values()) { + if (node.containsShard(shard) && explain == false) { + // decision is NO without needing to check anything further, so short circuit + continue; + } + + // weight of this index currently on the node + float currentWeight = weight.weightWithAllocationConstraints(this, node, shard.getIndexName()); + // moving the shard would not improve the balance, and we are not in explain mode, so short circuit + if (currentWeight > minWeight && explain == false) { + continue; + } + + Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); + if (explain) { + nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); + nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); + } + if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + final boolean updateMinNode; + if (currentWeight == minWeight) { + /* we have an equal weight tie breaking: + * 1. if one decision is YES prefer it + * 2. prefer the node that holds the primary for this index with the next id in the ring ie. + * for the 3 shards 2 replica case we try to build up: + * 1 2 0 + * 2 0 1 + * 0 1 2 + * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater + * than the id of the shard we need to assign. This works find when new indices are created since + * primaries are added first and we only add one shard set a time in this algorithm. + */ + if (currentDecision.type() == decision.type()) { + final int repId = shard.id(); + final int nodeHigh = node.highestPrimary(shard.index().getName()); + final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); + updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) + && (nodeHigh < minNodeHigh)) || (nodeHigh > repId && minNodeHigh < repId)); + } else { + updateMinNode = currentDecision.type() == Decision.Type.YES; + } + } else { + updateMinNode = currentWeight < minWeight; + } + if (updateMinNode) { + minNode = node; + minWeight = currentWeight; + decision = currentDecision; + } + } + } + if (decision == null) { + // decision was not set and a node was not assigned, so treat it as a NO decision + decision = Decision.NO; + } + List nodeDecisions = null; + if (explain) { + nodeDecisions = new ArrayList<>(); + // fill in the correct weight ranking, once we've been through all nodes + nodeWeights.sort((nodeWeight1, nodeWeight2) -> Float.compare(nodeWeight1.v2(), nodeWeight2.v2())); + int weightRanking = 0; + for (Tuple nodeWeight : nodeWeights) { + NodeAllocationResult current = nodeExplanationMap.get(nodeWeight.v1()); + nodeDecisions.add(new NodeAllocationResult(current.getNode(), current.getCanAllocateDecision(), ++weightRanking)); + } + } + return AllocateUnassignedDecision.fromDecision(decision, minNode != null ? minNode.getRoutingNode().node() : null, nodeDecisions); + } + + private static final Comparator BY_DESCENDING_SHARD_ID = Comparator.comparing(ShardRouting::shardId).reversed(); + + /** + * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the + * balance model. Iff this method returns a true the relocation has already been executed on the + * simulation model as well as on the cluster. + */ + private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, BalancedShardsAllocator.ModelNode maxNode, String idx) { + final BalancedShardsAllocator.ModelIndex index = maxNode.getIndex(idx); + if (index != null) { + logger.trace("Try relocating shard of [{}] from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); + final Iterable shardRoutings = StreamSupport.stream(index.spliterator(), false) + .filter(ShardRouting::started) // cannot rebalance unassigned, initializing or relocating shards anyway + .filter(maxNode::containsShard) + .sorted(BY_DESCENDING_SHARD_ID) // check in descending order of shard id so that the decision is deterministic + ::iterator; + + final AllocationDeciders deciders = allocation.deciders(); + for (ShardRouting shard : shardRoutings) { + final Decision rebalanceDecision = deciders.canRebalance(shard, allocation); + if (rebalanceDecision.type() == Decision.Type.NO) { + continue; + } + final Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); + if (allocationDecision.type() == Decision.Type.NO) { + continue; + } + + final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); + + maxNode.removeShard(shard); + long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + + if (decision.type() == Decision.Type.YES) { + /* only allocate on the cluster if we are not throttled */ + logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); + minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); + return true; + } else { + /* allocate on the model even if throttled */ + logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); + assert decision.type() == Decision.Type.THROTTLE; + minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); + return false; + } + } + } + logger.trace("No shards of [{}] can relocate from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); + return false; + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java new file mode 100644 index 0000000000000..593e6998141fb --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation.allocator; + +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.opensearch.cluster.routing.allocation.MoveDecision; + +/** + *

    + * A {@link ShardsBalancer} helps the {@link BalancedShardsAllocator} to perform allocation and balancing + * operations on the cluster. + *

    + * + * @opensearch.internal + */ +public abstract class ShardsBalancer { + + /** + * Performs allocation of unassigned shards on nodes within the cluster. + */ + abstract void allocateUnassigned(); + + /** + * Moves shards that cannot be allocated to a node anymore. + */ + abstract void moveShards(); + + /** + * Balances the nodes on the cluster model. + */ + abstract void balance(); + + /** + * Make a decision for allocating an unassigned shard. + * @param shardRouting the shard for which the decision has to be made + * @return the allocation decision + */ + abstract AllocateUnassignedDecision decideAllocateUnassigned(ShardRouting shardRouting); + + /** + * Makes a decision on whether to move a started shard to another node. + * @param shardRouting the shard for which the decision has to be made + * @return a move decision for the shard + */ + abstract MoveDecision decideMove(ShardRouting shardRouting); + + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. + * @param shardRouting the shard for which the move decision has to be made + * @return a move decision for the shard + */ + abstract MoveDecision decideRebalance(ShardRouting shardRouting); + + /** + * Returns the average of shards per node for the given index + */ + public float avgShardsPerNode() { + return Float.MAX_VALUE; + } + + /** + * Returns the global average of shards per node + */ + public float avgShardsPerNode(String index) { + return Float.MAX_VALUE; + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java index d115ee0c515cc..ae10a92a5104e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java @@ -10,6 +10,8 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.LocalShardsBalancer; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -45,7 +47,7 @@ public void testSettings() { * for IndexShardPerNode constraint satisfied and breached. */ public void testIndexShardsPerNodeConstraint() { - BalancedShardsAllocator.Balancer balancer = mock(BalancedShardsAllocator.Balancer.class); + ShardsBalancer balancer = mock(LocalShardsBalancer.class); BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); AllocationConstraints constraints = new AllocationConstraints(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java index a7b53a4c4bc8b..d29249cef0818 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -43,7 +43,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; @@ -65,7 +65,7 @@ import static org.hamcrest.Matchers.startsWith; /** - * Tests for balancing a single shard, see {@link Balancer#decideRebalance(ShardRouting)}. + * Tests for balancing a single shard, see {@link ShardsBalancer#decideRebalance(ShardRouting)}. */ public class BalancedSingleShardTests extends OpenSearchAllocationTestCase { From cdc7a2fbe8c06a0ab11b35bd2142ca3ecdc0a0ba Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 12:33:43 +0530 Subject: [PATCH 064/122] Fix decommission status update to non leader nodes (#4800) * Fix decommission status update to non leader nodes Signed-off-by: Rishab Nahata --- CHANGELOG.md | 2 + .../AwarenessAttributeDecommissionIT.java | 164 ++++++++++++++++++ .../DecommissionAttributeMetadata.java | 21 +-- .../decommission/DecommissionController.java | 8 +- 4 files changed, 179 insertions(+), 16 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6e5425f37db..bc22e66fe11ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -138,7 +138,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) +- Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) + ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java new file mode 100644 index 0000000000000..b8318503ee4a5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.After; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.NodeRoles.onlyRole; +import static org.opensearch.test.OpenSearchIntegTestCase.client; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionIT extends OpenSearchIntegTestCase { + private final Logger logger = LogManager.getLogger(AwarenessAttributeDecommissionIT.class); + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + @After + public void cleanup() throws Exception { + assertNoTimeout(client().admin().cluster().prepareHealth().get()); + } + + public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + + logger.info("--> starting decommissioning nodes in zone {}", 'c'); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + assertTrue(decommissionResponse.isAcknowledged()); + + // Will wait for all events to complete + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + // assert that decommission status is successful + GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()) + .get(); + assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); + + ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); + assertEquals(4, clusterState.nodes().getSize()); + + // assert status on nodes that are part of cluster currently + Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + while (discoveryNodeIterator.hasNext()) { + // assert no node has decommissioned attribute + DiscoveryNode node = discoveryNodeIterator.next(); + assertNotEquals(node.getAttributes().get("zone"), "c"); + + // assert all the nodes has status as SUCCESSFUL + ClusterService localNodeClusterService = internalCluster().getInstance(ClusterService.class, node.getName()); + assertEquals( + localNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.SUCCESSFUL + ); + } + + // assert status on decommissioned node + // Here we will verify that until it got kicked out, it received appropriate status updates + // decommissioned nodes hence will have status as IN_PROGRESS as it will be kicked out later after this + // and won't receive status update to SUCCESSFUL + String randomDecommissionedNode = randomFrom(clusterManagerNodes.get(2), dataNodes.get(2)); + ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, randomDecommissionedNode); + assertEquals( + decommissionedNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.IN_PROGRESS + ); + + // Will wait for all events to complete + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + // Recommissioning the zone back to gracefully succeed the test once above tests succeeds + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodes.get(0)).execute( + DeleteDecommissionStateAction.INSTANCE, + new DeleteDecommissionStateRequest() + ).get(); + assertTrue(deleteDecommissionStateResponse.isAcknowledged()); + + // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) + // as by then all nodes should have joined the cluster + ensureStableCluster(6, TimeValue.timeValueMinutes(2)); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index dbb3fea823eb6..d3d508bf36451 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -79,35 +79,29 @@ public DecommissionStatus status() { /** * Returns instance of the metadata with updated status * @param newStatus status to be updated with - * @return instance with valid status */ // synchronized is strictly speaking not needed (this is called by a single thread), but just to be safe - public synchronized DecommissionAttributeMetadata setUpdatedStatus(DecommissionStatus newStatus) { - // if the current status is the expected status already, we return the same instance - if (newStatus.equals(status)) { - return this; + public synchronized void validateNewStatus(DecommissionStatus newStatus) { + // if the current status is the expected status already or new status is FAILED, we let the check pass + if (newStatus.equals(status) || newStatus.equals(DecommissionStatus.FAILED)) { + return; } // We don't expect that INIT will be new status, as it is registered only when starting the decommission action switch (newStatus) { case IN_PROGRESS: - validateAndSetStatus(DecommissionStatus.INIT, newStatus); + validateStatus(DecommissionStatus.INIT, newStatus); break; case SUCCESSFUL: - validateAndSetStatus(DecommissionStatus.IN_PROGRESS, newStatus); - break; - case FAILED: - // we don't need to validate here and directly update status to FAILED - this.status = newStatus; + validateStatus(DecommissionStatus.IN_PROGRESS, newStatus); break; default: throw new IllegalArgumentException( "illegal decommission status [" + newStatus.status() + "] requested for updating metadata" ); } - return this; } - private void validateAndSetStatus(DecommissionStatus expected, DecommissionStatus next) { + private void validateStatus(DecommissionStatus expected, DecommissionStatus next) { if (status.equals(expected) == false) { assert false : "can't move decommission status to [" + next @@ -120,7 +114,6 @@ private void validateAndSetStatus(DecommissionStatus expected, DecommissionStatu "can't move decommission status to [" + next + "]. current status: [" + status + "] (expected [" + expected + "])" ); } - status = next; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java index 7719012f2f3d7..b58d99a9d59db 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -246,8 +246,12 @@ public ClusterState execute(ClusterState currentState) { decommissionAttributeMetadata.status(), decommissionStatus ); - // setUpdatedStatus can throw IllegalStateException if the sequence of update is not valid - decommissionAttributeMetadata.setUpdatedStatus(decommissionStatus); + // validateNewStatus can throw IllegalStateException if the sequence of update is not valid + decommissionAttributeMetadata.validateNewStatus(decommissionStatus); + decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttributeMetadata.decommissionAttribute(), + decommissionStatus + ); return ClusterState.builder(currentState) .metadata(Metadata.builder(currentState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) .build(); From f3d038f2703f7928dc8b43d9a8cdc0d4ecbd528a Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 18:23:51 +0530 Subject: [PATCH 065/122] Remove redundant field from GetDecommissionStateResponse (#4751) * Add attribute name to query param and simplify GetDecommissionStateResponse Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../cluster.get_decommission_awareness.json | 12 ++- .../get/GetDecommissionStateRequest.java | 42 +++++++- .../GetDecommissionStateRequestBuilder.java | 9 ++ .../get/GetDecommissionStateResponse.java | 101 +++++------------- .../TransportGetDecommissionStateAction.java | 5 +- .../opensearch/client/ClusterAdminClient.java | 6 +- .../client/support/AbstractClient.java | 6 +- .../RestGetDecommissionStateAction.java | 6 +- .../get/GetDecommissionStateRequestTests.java | 50 +++++++++ .../GetDecommissionStateResponseTests.java | 13 +-- 11 files changed, 158 insertions(+), 93 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bc22e66fe11ad..646151674f9f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -136,6 +136,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) +- [BUG]: Remove redundant field from GetDecommissionStateResponse ([#4751](https://github.com/opensearch-project/OpenSearch/pull/4751)) - Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json index 430f96921fbc2..302dea4ec31a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -8,10 +8,16 @@ "url": { "paths": [ { - "path": "/_cluster/decommission/awareness/_status", - "methods": [ + "path":"/_cluster/decommission/awareness/{awareness_attribute_name}/_status", + "methods":[ "GET" - ] + ], + "parts":{ + "awareness_attribute_name":{ + "type":"string", + "description":"Awareness attribute name" + } + } } ] } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 90150c71bf3f2..1f301aa2b5273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,11 +10,14 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import java.io.IOException; +import static org.opensearch.action.ValidateActions.addValidationError; + /** * Get Decommissioned attribute request * @@ -22,19 +25,56 @@ */ public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { + private String attributeName; + public GetDecommissionStateRequest() {} + /** + * Constructs a new get decommission state request with given attribute name + * + * @param attributeName name of the attribute + */ + public GetDecommissionStateRequest(String attributeName) { + this.attributeName = attributeName; + } + public GetDecommissionStateRequest(StreamInput in) throws IOException { super(in); + attributeName = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeString(attributeName); } @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + if (attributeName == null || Strings.isEmpty(attributeName)) { + validationException = addValidationError("attribute name is missing", validationException); + } + return validationException; + } + + /** + * Sets attribute name + * + * @param attributeName attribute name + * @return this request + */ + public GetDecommissionStateRequest attributeName(String attributeName) { + this.attributeName = attributeName; + return this; + } + + /** + * Returns attribute name + * + * @return attributeName name of attribute + */ + public String attributeName() { + return this.attributeName; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index 2b8616d0511cd..e766e9c674ff7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -27,4 +27,13 @@ public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOp public GetDecommissionStateRequestBuilder(OpenSearchClient client, GetDecommissionStateAction action) { super(client, action, new GetDecommissionStateRequest()); } + + /** + * @param attributeName name of attribute + * @return current object + */ + public GetDecommissionStateRequestBuilder setAttributeName(String attributeName) { + request.attributeName(attributeName); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index 2034cdb16e40f..ec0bd7cf7e7eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -10,7 +10,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionResponse; -import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -31,49 +30,40 @@ */ public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { - private DecommissionAttribute decommissionedAttribute; + private String attributeValue; private DecommissionStatus status; GetDecommissionStateResponse() { this(null, null); } - GetDecommissionStateResponse(DecommissionAttribute decommissionedAttribute, DecommissionStatus status) { - this.decommissionedAttribute = decommissionedAttribute; + GetDecommissionStateResponse(String attributeValue, DecommissionStatus status) { + this.attributeValue = attributeValue; this.status = status; } GetDecommissionStateResponse(StreamInput in) throws IOException { // read decommissioned attribute and status only if it is present if (in.readBoolean()) { - this.decommissionedAttribute = new DecommissionAttribute(in); - } - if (in.readBoolean()) { + this.attributeValue = in.readString(); this.status = DecommissionStatus.fromString(in.readString()); } } @Override public void writeTo(StreamOutput out) throws IOException { - // if decommissioned attribute is null, mark absence of decommissioned attribute - if (decommissionedAttribute == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - decommissionedAttribute.writeTo(out); - } - - // if status is null, mark absence of status - if (status == null) { + // if decommissioned attribute value is null or status is null then mark its absence + if (attributeValue == null || status == null) { out.writeBoolean(false); } else { out.writeBoolean(true); + out.writeString(attributeValue); out.writeString(status.status()); } } - public DecommissionAttribute getDecommissionedAttribute() { - return decommissionedAttribute; + public String getAttributeValue() { + return attributeValue; } public DecommissionStatus getDecommissionStatus() { @@ -83,13 +73,8 @@ public DecommissionStatus getDecommissionStatus() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("awareness"); - if (decommissionedAttribute != null) { - builder.field(decommissionedAttribute.attributeName(), decommissionedAttribute.attributeValue()); - } - builder.endObject(); - if (status != null) { - builder.field("status", status); + if (attributeValue != null && status != null) { + builder.field(attributeValue, status); } builder.endObject(); return builder; @@ -97,58 +82,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetDecommissionStateResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - String attributeType = "awareness"; XContentParser.Token token; - DecommissionAttribute decommissionAttribute = null; + String attributeValue = null; DecommissionStatus status = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if (attributeType.equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new OpenSearchParseException( - "failed to parse decommission attribute type [{}], expected object", - attributeType - ); - } - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String fieldName = parser.currentName(); - String value; - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_STRING) { - value = parser.text(); - } else { - throw new OpenSearchParseException( - "failed to parse attribute [{}], expected string for attribute value", - fieldName - ); - } - decommissionAttribute = new DecommissionAttribute(fieldName, value); - parser.nextToken(); - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); - } - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); - } - } else if ("status".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new OpenSearchParseException( - "failed to parse status of decommissioning, expected string but found unknown type" - ); - } - status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); - } else { - throw new OpenSearchParseException( - "unknown field found [{}], failed to parse the decommission attribute", - currentFieldName - ); + attributeValue = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse status of decommissioning, expected string but found unknown type"); } + status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); + } else { + throw new OpenSearchParseException( + "failed to parse decommission state, expected [{}] but found [{}]", + XContentParser.Token.FIELD_NAME, + token + ); } } - return new GetDecommissionStateResponse(decommissionAttribute, status); + return new GetDecommissionStateResponse(attributeValue, status); } @Override @@ -156,11 +108,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetDecommissionStateResponse that = (GetDecommissionStateResponse) o; - return decommissionedAttribute.equals(that.decommissionedAttribute) && status == that.status; + if (!Objects.equals(attributeValue, that.attributeValue)) { + return false; + } + return Objects.equals(status, that.status); } @Override public int hashCode() { - return Objects.hash(decommissionedAttribute, status); + return Objects.hash(attributeValue, status); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java index 48ed13c6c0aaf..d811ab8cf6948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -69,10 +69,11 @@ protected void clusterManagerOperation( ActionListener listener ) throws Exception { DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { + if (decommissionAttributeMetadata != null + && request.attributeName().equals(decommissionAttributeMetadata.decommissionAttribute().attributeName())) { listener.onResponse( new GetDecommissionStateResponse( - decommissionAttributeMetadata.decommissionAttribute(), + decommissionAttributeMetadata.decommissionAttribute().attributeValue(), decommissionAttributeMetadata.status() ) ); diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 77ddb5e17c742..4ab438ec064f1 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -873,17 +873,17 @@ public interface ClusterAdminClient extends OpenSearchClient { /** * Get Decommissioned attribute */ - ActionFuture getDecommission(GetDecommissionStateRequest request); + ActionFuture getDecommissionState(GetDecommissionStateRequest request); /** * Get Decommissioned attribute */ - void getDecommission(GetDecommissionStateRequest request, ActionListener listener); + void getDecommissionState(GetDecommissionStateRequest request, ActionListener listener); /** * Get Decommissioned attribute */ - GetDecommissionStateRequestBuilder prepareGetDecommission(); + GetDecommissionStateRequestBuilder prepareGetDecommissionState(); /** * Deletes the decommission metadata. diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index b42010d4253d5..828ca5f8083ee 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -1417,17 +1417,17 @@ public DecommissionRequestBuilder prepareDecommission(DecommissionRequest reques } @Override - public ActionFuture getDecommission(GetDecommissionStateRequest request) { + public ActionFuture getDecommissionState(GetDecommissionStateRequest request) { return execute(GetDecommissionStateAction.INSTANCE, request); } @Override - public void getDecommission(GetDecommissionStateRequest request, ActionListener listener) { + public void getDecommissionState(GetDecommissionStateRequest request, ActionListener listener) { execute(GetDecommissionStateAction.INSTANCE, request, listener); } @Override - public GetDecommissionStateRequestBuilder prepareGetDecommission() { + public GetDecommissionStateRequestBuilder prepareGetDecommissionState() { return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java index 8bc89ebf37960..5d72adbd6ae08 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetDecommissionStateAction.java @@ -30,7 +30,7 @@ public class RestGetDecommissionStateAction extends BaseRestHandler { @Override public List routes() { - return singletonList(new Route(GET, "/_cluster/decommission/awareness/_status")); + return singletonList(new Route(GET, "/_cluster/decommission/awareness/{awareness_attribute_name}/_status")); } @Override @@ -41,6 +41,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { GetDecommissionStateRequest getDecommissionStateRequest = Requests.getDecommissionStateRequest(); - return channel -> client.admin().cluster().getDecommission(getDecommissionStateRequest, new RestToXContentListener<>(channel)); + String attributeName = request.param("awareness_attribute_name"); + getDecommissionStateRequest.attributeName(attributeName); + return channel -> client.admin().cluster().getDecommissionState(getDecommissionStateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java new file mode 100644 index 0000000000000..973485e1917f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.get; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class GetDecommissionStateRequestTests extends OpenSearchTestCase { + public void testSerialization() throws IOException { + String attributeName = "zone"; + final GetDecommissionStateRequest originalRequest = new GetDecommissionStateRequest(attributeName); + final GetDecommissionStateRequest deserialized = copyWriteable( + originalRequest, + writableRegistry(), + GetDecommissionStateRequest::new + ); + assertEquals(deserialized.attributeName(), originalRequest.attributeName()); + } + + public void testValidation() { + { + String attributeName = null; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = ""; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("attribute name is missing")); + } + { + String attributeName = "zone"; + final GetDecommissionStateRequest request = new GetDecommissionStateRequest(attributeName); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java index 97bc54d8d7b30..437faf2a75720 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponseTests.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.decommission.awareness.get; -import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; @@ -18,11 +17,13 @@ public class GetDecommissionStateResponseTests extends AbstractXContentTestCase { @Override protected GetDecommissionStateResponse createTestInstance() { - DecommissionStatus status = randomFrom(DecommissionStatus.values()); - String attributeName = randomAlphaOfLength(10); - String attributeValue = randomAlphaOfLength(10); - DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); - return new GetDecommissionStateResponse(decommissionAttribute, status); + DecommissionStatus status = null; + String attributeValue = null; + if (randomBoolean()) { + status = randomFrom(DecommissionStatus.values()); + attributeValue = randomAlphaOfLength(10); + } + return new GetDecommissionStateResponse(attributeValue, status); } @Override From b19429b97d06876408474e11287be1a8e6131d58 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 18 Oct 2022 21:09:01 +0530 Subject: [PATCH 066/122] Fix bug in AwarenessAttributeDecommissionIT (#4822) * Fix bug in AwarenessAttributeDecommissionIT Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../coordination/AwarenessAttributeDecommissionIT.java | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 646151674f9f4..c7f5ad1c95ab4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,6 +141,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) +- Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index b8318503ee4a5..a2270d63ba6fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -40,7 +40,6 @@ import java.util.concurrent.ExecutionException; import static org.opensearch.test.NodeRoles.onlyRole; -import static org.opensearch.test.OpenSearchIntegTestCase.client; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -113,9 +112,11 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // assert that decommission status is successful - GetDecommissionStateResponse response = client().execute(GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest()) - .get(); - assertEquals(response.getDecommissionedAttribute(), decommissionAttribute); + GetDecommissionStateResponse response = client().execute( + GetDecommissionStateAction.INSTANCE, + new GetDecommissionStateRequest(decommissionAttribute.attributeName()) + ).get(); + assertEquals(response.getAttributeValue(), decommissionAttribute.attributeValue()); assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); From f0b759d19fe0b2650cf3e51ae639d621e5c041c7 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 18 Oct 2022 14:04:26 -0700 Subject: [PATCH 067/122] Exclude jettison version brought in with hadoop-minicluster. (#4787) Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + buildSrc/version.properties | 1 + plugins/discovery-azure-classic/build.gradle | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 ++ 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f5ad1c95ab4..1304f8bb203d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) +- Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 08784c82a4cc4..db0e5b142f7de 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -17,6 +17,7 @@ supercsv = 2.4.0 log4j = 2.17.1 slf4j = 1.7.36 asm = 9.3 +jettison = 1.5.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.12.1 diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 8ca9491f834a6..c88d19f0e2806 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.5.1' + api "org.codehaus.jettison:jettison:${versions.jettison}" api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index d01db6376db42..03455adc8033d 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -35,7 +35,9 @@ group = 'hdfs' dependencies { api("org.apache.hadoop:hadoop-minicluster:3.3.4") { exclude module: 'websocket-client' + exclude module: 'jettison' } + api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" From fce34ce68d7f86f8550036e9a51ff58223beb130 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 18 Oct 2022 15:59:08 -0700 Subject: [PATCH 068/122] Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs. (#4790) Signed-off-by: Marc Handalian Add missing SHAs. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 4 +++- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 | 1 - plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 | 1 - .../repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 | 1 + 7 files changed, 7 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1304f8bb203d1..15f3e1e5cdd2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,7 +41,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - Bumps `gson` from 2.9.0 to 2.9.1 -- Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7k - Bumps `azure-core` from 1.31.0 to 1.33.0 - Bumps `avro` from 1.11.0 to 1.11.1 - Bumps `woodstox-core` from 6.3.0 to 6.3.1 @@ -60,6 +60,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) +- Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#]()) + ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 097e96fcd8fdc..05e879547a4b0 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'com.google.api:gax:2.17.0' api 'org.threeten:threetenbp:1.4.4' api 'com.google.protobuf:protobuf-java-util:3.20.0' - api 'com.google.protobuf:protobuf-java:3.19.3' + api 'com.google.protobuf:protobuf-java:3.21.7' api 'com.google.code.gson:gson:2.9.0' api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 deleted file mode 100644 index 655ecd1f1c1c9..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 792bdc6bacd4a..07dc8b8547b80 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,7 +69,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.4' + api 'com.google.protobuf:protobuf-java:3.21.7' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 deleted file mode 100644 index f232c9a449547..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9947febd7a6d0695726c78f603a149b7b7c108e0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file From 49363fb06a4cbd48e6baa90f53b74764fb8ec155 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 18 Oct 2022 19:01:27 -0500 Subject: [PATCH 069/122] Disable merge on refresh in DiskThresholdDeciderIT (#4828) Disables merge on refresh for DiskThresholdDeciderIT.testRestoreSnapshotAllocationDoesNotExceedWatermark. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../routing/allocation/decider/DiskThresholdDeciderIT.java | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15f3e1e5cdd2a..f592ee04e55b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -166,6 +166,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) +- Disable merge on refresh in DiskThresholdDeciderIT ([#4828](https://github.com/opensearch-project/OpenSearch/pull/4828)) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 10e809e2fb5dc..955f0f0465d88 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -217,6 +217,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6) .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) .build() ); final long minShardSize = createReasonableSizedShards(indexName); From eb3301554ec7e1a07866508ffc390b8651dc3394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Wed, 19 Oct 2022 02:27:08 +0200 Subject: [PATCH 070/122] Add groupId value propagation tests for ZIP publication task (#4772) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The groupId can be defined on several levels. This commit adds more tests to cover the "edge" cases. - In one case the groupId is inherited from the top most 'allprojects' section (and thus can be missing in the publications section). - The other case is opposite, it tests that if the groupId is defined on several levels then the most internal level outweighs the other levels. Closes: #4771 Signed-off-by: Lukáš Vlček Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../gradle/pluginzip/PublishTests.java | 70 +++++++++++++++++++ .../pluginzip/allProjectsGroup.gradle | 28 ++++++++ .../pluginzip/groupPriorityLevel.gradle | 30 ++++++++ 4 files changed, 129 insertions(+) create mode 100644 buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index f592ee04e55b9..4c4c7c80e5410 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) +- Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 2ca0e507acb44..148a836f32b41 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -271,6 +271,76 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * If the `group` is defined in gradle's allprojects section then it does not have to defined in publications. + */ + @Test + public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "opensearch", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.opensearch"); + } + + /** + * The groupId value can be defined on several levels. This tests that the most internal level outweighs other levels. + */ + @Test + public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "level", + "3", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "level.3"); + } + /** * In this case the Publication entity is completely missing but still the POM file is generated using the default * values including the groupId and version values obtained from the Gradle project object. diff --git a/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle new file mode 100644 index 0000000000000..80638107c86e1 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle @@ -0,0 +1,28 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'org.opensearch' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + pom { + name = "sample-plugin" + description = "pluginDescription" + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle new file mode 100644 index 0000000000000..4da02c9f191d8 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle @@ -0,0 +1,30 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'level.1' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + groupId = "level.2" + pom { + name = "sample-plugin" + description = "pluginDescription" + groupId = "level.3" + } + } + } +} From ec3473708326fcb126f6a4440bbda93f4a1a6c14 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 19 Oct 2022 09:10:24 -0500 Subject: [PATCH 071/122] [Remove] Legacy Version support from Snapshot/Restore Service (#4728) Removes all stale snapshot / restore code for legacy versions prior to OpenSearch 2.0. This includes handling null ShardGenerations, partial concurrency, null index generations, etc. which are no longer supported in snapshot versions after legacy 7.5. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 2 + .../s3/S3BlobStoreRepositoryTests.java | 64 ----- .../repositories/s3/S3Repository.java | 85 ------- .../MultiVersionRepositoryAccessIT.java | 22 +- .../opensearch/snapshots/CloneSnapshotIT.java | 20 +- .../snapshots/ConcurrentSnapshotsIT.java | 38 --- .../CorruptedBlobStoreRepositoryIT.java | 191 -------------- .../delete/DeleteSnapshotRequest.java | 18 +- .../cluster/SnapshotDeletionsInProgress.java | 40 +-- .../cluster/SnapshotsInProgress.java | 59 +---- .../IndexMetaDataGenerations.java | 11 +- .../repositories/RepositoryData.java | 53 ++-- .../repositories/ShardGenerations.java | 23 +- .../blobstore/BlobStoreRepository.java | 238 ++++++------------ .../opensearch/snapshots/SnapshotInfo.java | 13 +- .../snapshots/SnapshotShardsService.java | 6 - .../snapshots/SnapshotsService.java | 232 +++-------------- .../repositories/RepositoryDataTests.java | 8 +- .../blobstore/BlobStoreTestUtil.java | 2 +- .../AbstractSnapshotIntegTestCase.java | 7 +- 20 files changed, 160 insertions(+), 972 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c4c7c80e5410..d294eb68ee7c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) + ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index b35d4080a413a..3c3f2887469b3 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -36,8 +36,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import fixture.s3.S3HttpHandler; -import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; @@ -45,29 +43,21 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -75,8 +65,6 @@ import java.util.Map; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.startsWith; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") @@ -84,8 +72,6 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class S3BlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { - private static final TimeValue TEST_COOLDOWN_PERIOD = TimeValue.timeValueSeconds(10L); - private String region; private String signerOverride; @@ -158,56 +144,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository( - randomName(), - Settings.builder().put(repositorySettings()).put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build() - ); - - final SnapshotId fakeOldSnapshot = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, "snapshot-old") - .setWaitForCompletion(true) - .setIndices() - .get() - .getSnapshotInfo() - .snapshotId(); - final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); - final RepositoryData repositoryData = getRepositoryData(repository); - final RepositoryData modifiedRepositoryData = repositoryData.withVersions( - Collections.singletonMap(fakeOldSnapshot, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion()) - ); - final BytesReference serialized = BytesReference.bytes( - modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> { - try (InputStream stream = serialized.streamInput()) { - repository.blobStore() - .blobContainer(repository.basePath()) - .writeBlobAtomic( - BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), - stream, - serialized.length(), - true - ); - } - }))); - - final String newSnapshotName = "snapshot-new"; - final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareCreateSnapshot(repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledSnapshot, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeThrottledDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, newSnapshotName).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledDelete, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeFastDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, fakeOldSnapshot.getName()).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); - } - /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index c8377949a6842..f80e5743edbc0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -35,10 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionRunnable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -52,7 +50,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.monitor.jvm.JvmInfo; @@ -62,13 +59,10 @@ import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; import java.util.Collection; import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -182,24 +176,6 @@ class S3Repository extends MeteredBlobStoreRepository { static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - /** - * Artificial delay to introduce after a snapshot finalization or delete has finished so long as the repository is still using the - * backwards compatible snapshot format from before - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} ({@link LegacyESVersion#V_7_6_0}). - * This delay is necessary so that the eventually consistent nature of AWS S3 does not randomly result in repository corruption when - * doing repository operations in rapid succession on a repository in the old metadata format. - * This setting should not be adjusted in production when working with an AWS S3 backed repository. Doing so risks the repository - * becoming silently corrupted. To get rid of this waiting period, either create a new S3 repository or remove all snapshots older than - * {@link LegacyESVersion#V_7_6_0} from the repository which will trigger an upgrade of the repository metadata to the new - * format and disable the cooldown period. - */ - static final Setting COOLDOWN_PERIOD = Setting.timeSetting( - "cooldown_period", - new TimeValue(3, TimeUnit.MINUTES), - new TimeValue(0, TimeUnit.MILLISECONDS), - Setting.Property.Dynamic - ); - /** * Specifies the path within bucket to repository data. Defaults to root directory. */ @@ -223,12 +199,6 @@ class S3Repository extends MeteredBlobStoreRepository { private final RepositoryMetadata repositoryMetadata; - /** - * Time period to delay repository operations by after finalizing or deleting a snapshot. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private final TimeValue coolDown; - /** * Constructs an s3 backed repository */ @@ -296,8 +266,6 @@ class S3Repository extends MeteredBlobStoreRepository { ); } - coolDown = COOLDOWN_PERIOD.get(metadata.settings()); - logger.debug( "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, @@ -334,9 +302,6 @@ public void finalizeSnapshot( Function stateTransformer, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.finalizeSnapshot( shardGenerations, repositoryStateId, @@ -355,59 +320,9 @@ public void deleteSnapshots( Version repositoryMetaVersion, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.deleteSnapshots(snapshotIds, repositoryStateId, repositoryMetaVersion, listener); } - /** - * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - }); - return new ActionListener() { - @Override - public void onResponse(T response) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule( - ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), - coolDown, - ThreadPool.Names.SNAPSHOT - ) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - - private void logCooldownInfo() { - logger.info( - "Sleeping for [{}] after modifying repository [{}] because it contains snapshots older than version [{}]" - + " and therefore is using a backwards compatible metadata format that requires this cooldown period to avoid " - + "repository corruption. To get rid of this message and move to the new repository metadata format, either remove " - + "all snapshots older than version [{}] from the repository or create a new repository at an empty location.", - coolDown, - metadata.name(), - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION - ); - } - @Override protected S3BlobStore createBlobStore() { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, repositoryMetadata); diff --git a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java index c0b90626d7bad..49efce0516434 100644 --- a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; @@ -42,20 +41,17 @@ import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; -import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.DeprecationHandler; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.test.rest.OpenSearchRestTestCase; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -231,20 +227,10 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } else { - if (SnapshotsService.useIndexGenerations(minimumNodeVersion()) == false) { - assertThat(TEST_STEP, is(TestStep.STEP3_OLD_CLUSTER)); - final List> expectedExceptions = - Arrays.asList(ResponseException.class, OpenSearchStatusException.class); - expectThrowsAnyOf(expectedExceptions, () -> listSnapshots(repoName)); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-1")); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-2")); - expectThrowsAnyOf(expectedExceptions, () -> createSnapshot(client, repoName, "snapshot-impossible", index)); - } else { - assertThat(listSnapshots(repoName), hasSize(2)); - if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { - ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); - ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); - } + assertThat(listSnapshots(repoName), hasSize(2)); + if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { + ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); + ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } } finally { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 7b95bf93f8bf4..f659d827448a0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -78,14 +78,6 @@ public void testShardClone() throws Exception { final Path repoPath = randomRepoPath(); createRepository(repoName, "fs", repoPath); - final boolean useBwCFormat = randomBoolean(); - if (useBwCFormat) { - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - // Re-create repo to clear repository data cache - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); - createRepository(repoName, "fs", repoPath); - } - final String indexName = "test-index"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String sourceSnapshot = "source-snapshot"; @@ -101,21 +93,11 @@ public void testShardClone() throws Exception { final SnapshotId targetSnapshotId = new SnapshotId("target-snapshot", UUIDs.randomBase64UUID(random())); - final String currentShardGen; - if (useBwCFormat) { - currentShardGen = null; - } else { - currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); - } + final String currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); final String newShardGeneration = PlainActionFuture.get( f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, currentShardGen, f) ); - if (useBwCFormat) { - final long gen = Long.parseLong(newShardGeneration); - assertEquals(gen, 1L); // Initial snapshot brought it to 0, clone increments it to 1 - } - final BlobStoreIndexShardSnapshot targetShardSnapshot = readShardSnapshot(repository, repositoryShardId, targetSnapshotId); final BlobStoreIndexShardSnapshot sourceShardSnapshot = readShardSnapshot( repository, diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index b6d482cad8860..f483ed7fe6c5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -56,7 +56,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -1297,43 +1296,6 @@ public void testConcurrentOperationsLimit() throws Exception { } } - public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String dataNode = internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository( - repoName, - "mock", - Settings.builder().put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false).put("location", repoPath) - ); - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - createIndexWithContent("index-slow"); - - final ActionFuture createSlowFuture = startFullSnapshotBlockedOnDataNode( - "slow-snapshot", - repoName, - dataNode - ); - - final String dataNode2 = internalCluster().startDataOnlyNode(); - ensureStableCluster(3); - final String indexFast = "index-fast"; - createIndexWithContent(indexFast, dataNode2, dataNode); - - final ActionFuture createFastSnapshot = startFullSnapshot(repoName, "fast-snapshot"); - - assertThat(createSlowFuture.isDone(), is(false)); - unblockNode(repoName, dataNode); - - assertSuccessful(createFastSnapshot); - assertSuccessful(createSlowFuture); - - final RepositoryData repositoryData = getRepositoryData(repoName); - assertThat(repositoryData.shardGenerations(), is(ShardGenerations.EMPTY)); - } - public void testQueuedDeleteAfterFinalizationFailure() throws Exception { final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); final String repoName = "test-repo"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index b806ee3e55a94..a07b245db2b21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -32,48 +32,33 @@ package org.opensearch.snapshots; -import org.opensearch.Version; -import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.repositories.IndexId; -import org.opensearch.repositories.IndexMetaDataGenerations; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.threadpool.ThreadPool; -import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFileExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -310,101 +295,6 @@ public void testFindDanglingLatestGeneration() throws Exception { ); } - public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { - Path repo = randomRepoPath(); - final String repoName = "test-repo"; - createRepository( - repoName, - "fs", - Settings.builder() - .put("location", repo) - .put("compress", false) - // Don't cache repository data because the test manually modifies the repository data - .put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ); - - final String snapshotPrefix = "test-snap-"; - final int snapshots = randomIntBetween(1, 2); - logger.info("--> creating [{}] snapshots", snapshots); - for (int i = 0; i < snapshots; ++i) { - // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard - // generations (the existence of which would short-circuit checks for the repo containing old version snapshots) - CreateSnapshotResponse createSnapshotResponse = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, snapshotPrefix + i) - .setIndices() - .setWaitForCompletion(true) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - } - final RepositoryData repositoryData = getRepositoryData(repoName); - - final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds()); - logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt); - Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); - - logger.info("--> strip version information from index-N blob"); - final RepositoryData withoutVersions = new RepositoryData( - repositoryData.getGenId(), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotState)), - Collections.emptyMap(), - Collections.emptyMap(), - ShardGenerations.EMPTY, - IndexMetaDataGenerations.EMPTY - ); - - Files.write( - repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> verify that repo is assumed in old metadata format"); - final SnapshotsService snapshotsService = internalCluster().getCurrentClusterManagerNodeInstance(SnapshotsService.class); - final ThreadPool threadPool = internalCluster().getCurrentClusterManagerNodeInstance(ThreadPool.class); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - - logger.info("--> verify that snapshot with missing root level metadata can be deleted"); - assertAcked(startDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get()); - - logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot"); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(Version.CURRENT) - ); - final RepositoryData finalRepositoryData = getRepositoryData(repoName); - for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { - assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); - } - } - public void testMountCorruptedRepositoryData() throws Exception { disableRepoConsistencyCheck("This test intentionally corrupts the repository contents"); Client client = client(); @@ -453,87 +343,6 @@ public void testMountCorruptedRepositoryData() throws Exception { expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); } - public void testHandleSnapshotErrorWithBwCFormat() throws IOException, ExecutionException, InterruptedException { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - // In the old metadata version the shard level metadata could be moved to the next generation for all sorts of reasons, this should - // not break subsequent repository operations - logger.info("--> move shard level metadata to new generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - createFullSnapshot(repoName, "snapshot-2"); - } - - public void testRepairBrokenShardGenerations() throws Exception { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); - - final RepositoryData repositoryData1 = getRepositoryData(repoName); - final Map snapshotIds = repositoryData1.getSnapshotIds() - .stream() - .collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())); - final RepositoryData brokenRepoData = new RepositoryData( - repositoryData1.getGenId(), - snapshotIds, - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getSnapshotState)), - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getVersion)), - repositoryData1.getIndices().values().stream().collect(Collectors.toMap(Function.identity(), repositoryData1::getSnapshots)), - ShardGenerations.builder().putAll(repositoryData1.shardGenerations()).put(indexId, 0, "0").build(), - repositoryData1.indexMetaDataGenerations() - ); - Files.write( - repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData1.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(brokenRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - createFullSnapshot(repoName, "snapshot-2"); - } - /** * Tests that a shard snapshot with a corrupted shard index file can still be used for restore and incremental snapshots. */ diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 61c4fdb9d5c14..832b37050ffe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -36,7 +36,6 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; @@ -84,27 +83,14 @@ public DeleteSnapshotRequest(String repository) { public DeleteSnapshotRequest(StreamInput in) throws IOException { super(in); repository = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - snapshots = in.readStringArray(); - } else { - snapshots = new String[] { in.readString() }; - } + snapshots = in.readStringArray(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(repository); - if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - out.writeStringArray(snapshots); - } else { - if (snapshots.length != 1) { - throw new IllegalArgumentException( - "Can't write snapshot delete with more than one snapshot to version [" + out.getVersion() + "]" - ); - } - out.writeString(snapshots[0]); - } + out.writeStringArray(snapshots); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java index 0f95890e56ec2..e4ce0a8f99e02 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -42,9 +41,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.repositories.RepositoryOperation; -import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.ArrayList; @@ -245,23 +242,12 @@ private Entry(List snapshots, String repoName, long startTime, long } public Entry(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - this.repoName = in.readString(); - this.snapshots = in.readList(SnapshotId::new); - } else { - final Snapshot snapshot = new Snapshot(in); - this.snapshots = Collections.singletonList(snapshot.getSnapshotId()); - this.repoName = snapshot.getRepository(); - } + this.repoName = in.readString(); + this.snapshots = in.readList(SnapshotId::new); this.startTime = in.readVLong(); this.repositoryStateId = in.readLong(); - if (in.getVersion().onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - this.state = State.readFrom(in); - this.uuid = in.readString(); - } else { - this.state = State.STARTED; - this.uuid = IndexMetadata.INDEX_UUID_NA_VALUE; - } + this.state = State.readFrom(in); + this.uuid = in.readString(); } public Entry started() { @@ -343,22 +329,12 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - out.writeString(repoName); - out.writeCollection(snapshots); - } else { - assert snapshots.size() == 1 : "Only single deletion allowed in mixed version cluster containing [" - + out.getVersion() - + "] but saw " - + snapshots; - new Snapshot(repoName, snapshots.get(0)).writeTo(out); - } + out.writeString(repoName); + out.writeCollection(snapshots); out.writeVLong(startTime); out.writeLong(repositoryStateId); - if (out.getVersion().onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - state.writeTo(out); - out.writeString(uuid); - } + state.writeTo(out); + out.writeString(uuid); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 4cbf0cfe70adb..f77b3e3b2a904 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,7 +35,6 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; @@ -54,7 +53,6 @@ import org.opensearch.snapshots.InFlightShardSnapshotStates; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.Collections; @@ -66,8 +64,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; - /** * Meta data about snapshots that are currently executing * @@ -77,8 +73,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Collections.emptyList()); - private static final Version VERSION_IN_SNAPSHOT_VERSION = LegacyESVersion.V_7_7_0; - public static final String TYPE = "snapshots"; public static final String ABORTED_FAILURE_TEXT = "Snapshot was aborted by deletion"; @@ -296,28 +290,10 @@ private Entry(StreamInput in) throws IOException { repositoryStateId = in.readLong(); failure = in.readOptionalString(); userMetadata = in.readMap(); - if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { - version = Version.readVersion(in); - } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - // If an older cluster-manager informs us that shard generations are supported - // we use the minimum shard generation compatible version. - // If shard generations are not supported yet we use a placeholder for a version that does not use shard generations. - version = in.readBoolean() ? SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION : SnapshotsService.OLD_SNAPSHOT_FORMAT; - } else { - version = SnapshotsService.OLD_SNAPSHOT_FORMAT; - } - if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - dataStreams = in.readStringList(); - } else { - dataStreams = Collections.emptyList(); - } - if (in.getVersion().onOrAfter(SnapshotsService.CLONE_SNAPSHOT_VERSION)) { - source = in.readOptionalWriteable(SnapshotId::new); - clones = in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); - } else { - source = null; - clones = ImmutableOpenMap.of(); - } + version = Version.readVersion(in); + dataStreams = in.readStringList(); + source = in.readOptionalWriteable(SnapshotId::new); + clones = in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); } private static boolean assertShardsConsistent( @@ -732,18 +708,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(repositoryStateId); out.writeOptionalString(failure); out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { - Version.writeVersion(version, out); - } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - out.writeBoolean(SnapshotsService.useShardGenerations(version)); - } - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } - if (out.getVersion().onOrAfter(SnapshotsService.CLONE_SNAPSHOT_VERSION)) { - out.writeOptionalWriteable(source); - out.writeMap(clones); - } + Version.writeVersion(version, out); + out.writeStringCollection(dataStreams); + out.writeOptionalWriteable(source); + out.writeMap(clones); } @Override @@ -840,12 +808,7 @@ private boolean assertConsistent() { public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { String nodeId = in.readOptionalString(); final ShardState state = ShardState.fromValue(in.readByte()); - final String generation; - if (SnapshotsService.useShardGenerations(in.getVersion())) { - generation = in.readOptionalString(); - } else { - generation = null; - } + final String generation = in.readOptionalString(); final String reason = in.readOptionalString(); if (state == ShardState.QUEUED) { return UNASSIGNED_QUEUED; @@ -884,9 +847,7 @@ public boolean isActive() { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nodeId); out.writeByte(state.value); - if (SnapshotsService.useShardGenerations(out.getVersion())) { - out.writeOptionalString(generation); - } + out.writeOptionalString(generation); out.writeOptionalString(reason); } diff --git a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java index afabf4ebfdb58..e8f96bb313dd1 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java @@ -92,10 +92,7 @@ public String getIndexMetaBlobId(String metaIdentifier) { } /** - * Get the blob id by {@link SnapshotId} and {@link IndexId} and fall back to the value of {@link SnapshotId#getUUID()} if none is - * known to enable backwards compatibility with versions older than - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} which used the snapshot uuid as index metadata - * blob uuid. + * Get the blob id by {@link SnapshotId} and {@link IndexId}. * * @param snapshotId Snapshot Id * @param indexId Index Id @@ -103,11 +100,7 @@ public String getIndexMetaBlobId(String metaIdentifier) { */ public String indexMetaBlobId(SnapshotId snapshotId, IndexId indexId) { final String identifier = lookup.getOrDefault(snapshotId, Collections.emptyMap()).get(indexId); - if (identifier == null) { - return snapshotId.getUUID(); - } else { - return identifiers.get(identifier); - } + return identifiers.get(identifier); } /** diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index e8132801e4238..524a978bcf1d1 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -42,7 +42,6 @@ import org.opensearch.common.xcontent.XContentParserUtils; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; import java.util.ArrayList; @@ -238,7 +237,6 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { /** * Returns the {@link Version} for the given snapshot or {@code null} if unknown. */ - @Nullable public Version getVersion(SnapshotId snapshotId) { return snapshotVersions.get(snapshotId.getUUID()); } @@ -551,8 +549,6 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final builder.startObject(); // write the snapshots list builder.startArray(SNAPSHOTS); - final boolean shouldWriteIndexGens = SnapshotsService.useIndexGenerations(repoMetaVersion); - final boolean shouldWriteShardGens = SnapshotsService.useShardGenerations(repoMetaVersion); for (final SnapshotId snapshot : getSnapshotIds()) { builder.startObject(); builder.field(NAME, snapshot.getName()); @@ -562,14 +558,12 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final if (state != null) { builder.field(STATE, state.value()); } - if (shouldWriteIndexGens) { - builder.startObject(INDEX_METADATA_LOOKUP); - for (Map.Entry entry : indexMetaDataGenerations.lookup.getOrDefault(snapshot, Collections.emptyMap()) - .entrySet()) { - builder.field(entry.getKey().getId(), entry.getValue()); - } - builder.endObject(); + builder.startObject(INDEX_METADATA_LOOKUP); + for (Map.Entry entry : indexMetaDataGenerations.lookup.getOrDefault(snapshot, Collections.emptyMap()) + .entrySet()) { + builder.field(entry.getKey().getId(), entry.getValue()); } + builder.endObject(); final Version version = snapshotVersions.get(snapshotUUID); if (version != null) { builder.field(VERSION, version.toString()); @@ -589,23 +583,15 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final builder.value(snapshotId.getUUID()); } builder.endArray(); - if (shouldWriteShardGens) { - builder.startArray(SHARD_GENERATIONS); - for (String gen : shardGenerations.getGens(indexId)) { - builder.value(gen); - } - builder.endArray(); + builder.startArray(SHARD_GENERATIONS); + for (String gen : shardGenerations.getGens(indexId)) { + builder.value(gen); } + builder.endArray(); builder.endObject(); } builder.endObject(); - if (shouldWriteIndexGens) { - builder.field(MIN_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION.toString()); - builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers); - } else if (shouldWriteShardGens) { - // Add min version field to make it impossible for older OpenSearch versions to deserialize this object - builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString()); - } + builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers); builder.endObject(); return builder; } @@ -616,12 +602,8 @@ public IndexMetaDataGenerations indexMetaDataGenerations() { /** * Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata. - * - * @param fixBrokenShardGens set to {@code true} to filter out broken shard generations read from the {@code parser} via - * {@link ShardGenerations#fixShardGeneration}. Used to disable fixing broken generations when reading - * from cached bytes that we trust to not contain broken generations. */ - public static RepositoryData snapshotsFromXContent(XContentParser parser, long genId, boolean fixBrokenShardGens) throws IOException { + public static RepositoryData snapshotsFromXContent(XContentParser parser, long genId) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); final Map snapshots = new HashMap<>(); @@ -639,16 +621,16 @@ public static RepositoryData snapshotsFromXContent(XContentParser parser, long g parseSnapshots(parser, snapshots, snapshotStates, snapshotVersions, indexMetaLookup); break; case INDICES: - parseIndices(parser, fixBrokenShardGens, snapshots, indexSnapshots, indexLookup, shardGenerations); + parseIndices(parser, snapshots, indexSnapshots, indexLookup, shardGenerations); break; case INDEX_METADATA_IDENTIFIERS: XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); indexMetaIdentifiers = parser.mapStrings(); break; case MIN_VERSION: - XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.nextToken(), parser); - final Version version = Version.fromString(parser.text()); - assert SnapshotsService.useShardGenerations(version); + // ignore min_version + // todo: remove in next version + parser.nextToken(); break; default: XContentParserUtils.throwUnknownField(field, parser.getTokenLocation()); @@ -763,7 +745,6 @@ private static void parseSnapshots( * {@code shardGenerations}. * * @param parser x-content parser - * @param fixBrokenShardGens whether or not to fix broken shard generation (see {@link #snapshotsFromXContent} for details) * @param snapshots map of snapshot uuid to {@link SnapshotId} that was populated by {@link #parseSnapshots} * @param indexSnapshots map of {@link IndexId} to list of {@link SnapshotId} that contain the given index * @param indexLookup map of index uuid (as returned by {@link IndexId#getId}) to {@link IndexId} @@ -771,7 +752,6 @@ private static void parseSnapshots( */ private static void parseIndices( XContentParser parser, - boolean fixBrokenShardGens, Map snapshots, Map> indexSnapshots, Map indexLookup, @@ -835,9 +815,6 @@ private static void parseIndices( indexLookup.put(indexId.getId(), indexId); for (int i = 0; i < gens.size(); i++) { String parsedGen = gens.get(i); - if (fixBrokenShardGens) { - parsedGen = ShardGenerations.fixShardGeneration(parsedGen); - } if (parsedGen != null) { shardGenerations.put(indexId, i, parsedGen); } diff --git a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java index 27eac4dfdd8c1..d918eb7e1e476 100644 --- a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java @@ -33,7 +33,6 @@ package org.opensearch.repositories; import org.opensearch.common.Nullable; -import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import java.util.Arrays; import java.util.Collection; @@ -44,7 +43,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -74,24 +72,6 @@ private ShardGenerations(Map> shardGenerations) { this.shardGenerations = shardGenerations; } - private static final Pattern IS_NUMBER = Pattern.compile("^\\d+$"); - - /** - * Filters out unreliable numeric shard generations read from {@link RepositoryData} or {@link IndexShardSnapshotStatus}, returning - * {@code null} in their place. - * @see Issue #57988 - * - * @param shardGeneration shard generation to fix - * @return given shard generation or {@code null} if it was filtered out or {@code null} was passed - */ - @Nullable - public static String fixShardGeneration(@Nullable String shardGeneration) { - if (shardGeneration == null) { - return null; - } - return IS_NUMBER.matcher(shardGeneration).matches() ? null : shardGeneration; - } - /** * Returns the total number of shards tracked by this instance. */ @@ -145,8 +125,7 @@ public Map> obsoleteShardGenerations(ShardGenerati *
      *
    • {@link #DELETED_SHARD_GEN} a deleted shard that isn't referenced by any snapshot in the repository any longer
    • *
    • {@link #NEW_SHARD_GEN} a new shard that we know doesn't hold any valid data yet in the repository
    • - *
    • {@code null} unknown state. The shard either does not exist at all or it was created by a node older than - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION}. If a caller expects a shard to exist in the + *
    • {@code null} unknown state. The shard does not exist at all. If a caller expects a shard to exist in the * repository but sees a {@code null} return, it should try to recover the generation by falling back to listing the contents * of the respective shard directory.
    • *
    diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index bf06191bdc8d3..1000fe4078b48 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -818,66 +818,46 @@ private void doDeleteShardSnapshots( Version repoMetaVersion, ActionListener listener ) { - - if (SnapshotsService.useShardGenerations(repoMetaVersion)) { - // First write the new shard state metadata (with the removed snapshot) and compute deletion targets - final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, true, writeShardMetaDataAndComputeDeletesStep); - // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: - // 1. Remove the snapshots from the list of existing snapshots - // 2. Update the index shard generations of all updated shard folders - // - // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created - // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only - // written if all shard paths have been successfully updated. - final StepListener writeUpdatedRepoDataStep = new StepListener<>(); - writeShardMetaDataAndComputeDeletesStep.whenComplete(deleteResults -> { - final ShardGenerations.Builder builder = ShardGenerations.builder(); - for (ShardSnapshotMetaDeleteResult newGen : deleteResults) { - builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); - } - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, builder.build()); - writeIndexGen( - updatedRepoData, - repositoryStateId, - repoMetaVersion, - Function.identity(), - ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) - ); - }, listener::onFailure); - // Once we have updated the repository, run the clean-ups - writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - final ActionListener afterCleanupsListener = new GroupedActionListener<>( - ActionListener.wrap(() -> listener.onResponse(updatedRepoData)), - 2 - ); - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener); - asyncCleanupUnlinkedShardLevelBlobs( - repositoryData, - snapshotIds, - writeShardMetaDataAndComputeDeletesStep.result(), - afterCleanupsListener - ); - }, listener::onFailure); - } else { - // Write the new repository data first (with the removed snapshot), using no shard generations - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY); - writeIndexGen(updatedRepoData, repositoryStateId, repoMetaVersion, Function.identity(), ActionListener.wrap(newRepoData -> { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - final ActionListener afterCleanupsListener = new GroupedActionListener<>( - ActionListener.wrap(() -> listener.onResponse(newRepoData)), - 2 - ); - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, newRepoData, afterCleanupsListener); - final StepListener> writeMetaAndComputeDeletesStep = new StepListener<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, false, writeMetaAndComputeDeletesStep); - writeMetaAndComputeDeletesStep.whenComplete( - deleteResults -> asyncCleanupUnlinkedShardLevelBlobs(repositoryData, snapshotIds, deleteResults, afterCleanupsListener), - afterCleanupsListener::onFailure - ); - }, listener::onFailure)); - } + // First write the new shard state metadata (with the removed snapshot) and compute deletion targets + final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); + writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, true, writeShardMetaDataAndComputeDeletesStep); + // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: + // 1. Remove the snapshots from the list of existing snapshots + // 2. Update the index shard generations of all updated shard folders + // + // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created + // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only + // written if all shard paths have been successfully updated. + final StepListener writeUpdatedRepoDataStep = new StepListener<>(); + writeShardMetaDataAndComputeDeletesStep.whenComplete(deleteResults -> { + final ShardGenerations.Builder builder = ShardGenerations.builder(); + for (ShardSnapshotMetaDeleteResult newGen : deleteResults) { + builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); + } + final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, builder.build()); + writeIndexGen( + updatedRepoData, + repositoryStateId, + repoMetaVersion, + Function.identity(), + ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) + ); + }, listener::onFailure); + // Once we have updated the repository, run the clean-ups + writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> { + // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion + final ActionListener afterCleanupsListener = new GroupedActionListener<>( + ActionListener.wrap(() -> listener.onResponse(updatedRepoData)), + 2 + ); + cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener); + asyncCleanupUnlinkedShardLevelBlobs( + repositoryData, + snapshotIds, + writeShardMetaDataAndComputeDeletesStep.result(), + afterCleanupsListener + ); + }, listener::onFailure); } private void cleanupUnlinkedRootAndIndicesBlobs( @@ -1365,31 +1345,19 @@ public void finalizeSnapshot( final Collection indices = shardGenerations.indices(); final SnapshotId snapshotId = snapshotInfo.snapshotId(); // Once we are done writing the updated index-N blob we remove the now unreferenced index-${uuid} blobs in each shard - // directory if all nodes are at least at version SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION - // If there are older version nodes in the cluster, we don't need to run this cleanup as it will have already happened - // when writing the index-${N} to each shard directory. - final boolean writeShardGens = SnapshotsService.useShardGenerations(repositoryMetaVersion); + // directory final Consumer onUpdateFailure = e -> listener.onFailure( new SnapshotException(metadata.name(), snapshotId, "failed to update snapshot in repository", e) ); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - final boolean writeIndexGens = SnapshotsService.useIndexGenerations(repositoryMetaVersion); - final StepListener repoDataListener = new StepListener<>(); getRepositoryData(repoDataListener); repoDataListener.whenComplete(existingRepositoryData -> { - final Map indexMetas; - final Map indexMetaIdentifiers; - if (writeIndexGens) { - indexMetaIdentifiers = ConcurrentCollections.newConcurrentMap(); - indexMetas = ConcurrentCollections.newConcurrentMap(); - } else { - indexMetas = null; - indexMetaIdentifiers = null; - } + final Map indexMetas = ConcurrentCollections.newConcurrentMap(); + final Map indexMetaIdentifiers = ConcurrentCollections.newConcurrentMap(); final ActionListener allMetaListener = new GroupedActionListener<>(ActionListener.wrap(v -> { final RepositoryData updatedRepositoryData = existingRepositoryData.addSnapshot( @@ -1406,9 +1374,7 @@ public void finalizeSnapshot( repositoryMetaVersion, stateTransformer, ActionListener.wrap(newRepoData -> { - if (writeShardGens) { - cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); - } + cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); listener.onResponse(newRepoData); }, onUpdateFailure) ); @@ -1432,24 +1398,15 @@ public void finalizeSnapshot( for (IndexId index : indices) { executor.execute(ActionRunnable.run(allMetaListener, () -> { final IndexMetadata indexMetaData = clusterMetadata.index(index.getName()); - if (writeIndexGens) { - final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); - String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); - if (metaUUID == null) { - // We don't yet have this version of the metadata so we write it - metaUUID = UUIDs.base64UUID(); - INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); - indexMetaIdentifiers.put(identifiers, metaUUID); - } - indexMetas.put(index, identifiers); - } else { - INDEX_METADATA_FORMAT.write( - clusterMetadata.index(index.getName()), - indexContainer(index), - snapshotId.getUUID(), - compress - ); + final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); + String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); + if (metaUUID == null) { + // We don't yet have this version of the metadata so we write it + metaUUID = UUIDs.base64UUID(); + INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); + indexMetaIdentifiers.put(identifiers, metaUUID); } + indexMetas.put(index, identifiers); })); } executor.execute( @@ -1774,8 +1731,7 @@ private RepositoryData repositoryDataFromCachedEntry(Tuple try (InputStream input = CompressorFactory.COMPRESSOR.threadLocalInputStream(cacheEntry.v2().streamInput())) { return RepositoryData.snapshotsFromXContent( XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input), - cacheEntry.v1(), - false + cacheEntry.v1() ); } } @@ -1870,7 +1826,7 @@ private RepositoryData getRepositoryData(long indexGen) { XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { - return RepositoryData.snapshotsFromXContent(parser, indexGen, true); + return RepositoryData.snapshotsFromXContent(parser, indexGen); } } catch (IOException ioe) { if (bestEffortConsistency) { @@ -2454,7 +2410,6 @@ public void snapshotShard( ); final String indexGeneration; - final boolean writeShardGens = SnapshotsService.useShardGenerations(repositoryMetaVersion); // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones List newSnapshotsList = new ArrayList<>(); newSnapshotsList.add(new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, shardStateIdentifier)); @@ -2463,71 +2418,24 @@ public void snapshotShard( } final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); final Runnable afterWriteSnapBlob; - if (writeShardGens) { - // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data - // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never - // reference a generation that has not had all its files fully upload. - indexGeneration = UUIDs.randomBase64UUID(); - try { - INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration, compress); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException( - shardId, - "Failed to write shard level snapshot metadata for [" - + snapshotId - + "] to [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) - + "]", - e - ); - } - afterWriteSnapBlob = () -> {}; - } else { - // When not using shard generations we can only write the index-${N} blob after all other work for this shard has - // completed. - // Also, in case of numeric shard generations the data node has to take care of deleting old shard generations. - final long newGen = Long.parseLong(fileListGeneration) + 1; - indexGeneration = Long.toString(newGen); - // Delete all previous index-N blobs - final List blobsToDelete = blobs.stream() - .filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)) - .collect(Collectors.toList()); - assert blobsToDelete.stream() - .mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) - .max() - .orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" - + indexGeneration - + "] when deleting index-N blobs " - + blobsToDelete; - afterWriteSnapBlob = () -> { - try { - writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException( - shardId, - "Failed to finalize snapshot creation [" - + snapshotId - + "] with shard index [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) - + "]", - e - ); - } - try { - deleteFromContainer(shardContainer, blobsToDelete); - } catch (IOException e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}][{}] failed to delete old index-N blobs during finalization", - snapshotId, - shardId - ), - e - ); - } - }; + // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data + // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never + // reference a generation that has not had all its files fully upload. + indexGeneration = UUIDs.randomBase64UUID(); + try { + INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration, compress); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException( + shardId, + "Failed to write shard level snapshot metadata for [" + + snapshotId + + "] to [" + + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) + + "]", + e + ); } - + afterWriteSnapBlob = () -> {}; final StepListener> allFilesUploadedListener = new StepListener<>(); allFilesUploadedListener.whenComplete(v -> { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); @@ -2970,9 +2878,7 @@ public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContaine * the given list of blobs in the shard container. * * @param blobs list of blobs in repository - * @param generation shard generation or {@code null} in case there was no shard generation tracked in the {@link RepositoryData} for - * this shard because its snapshot was created in a version older than - * {@link SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION}. + * @param generation shard generation * @return tuple of BlobStoreIndexShardSnapshots and the last snapshot index generation */ private Tuple buildBlobStoreIndexShardSnapshots( diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 38d9df0e960e0..94ab875091caf 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -31,7 +31,6 @@ package org.opensearch.snapshots; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ShardOperationFailedException; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -69,8 +68,6 @@ */ public final class SnapshotInfo implements Comparable, ToXContent, Writeable { - public static final Version DATA_STREAMS_IN_SNAPSHOT = LegacyESVersion.V_7_9_0; - public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); @@ -401,11 +398,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); userMetadata = in.readMap(); - if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - dataStreams = in.readStringList(); - } else { - dataStreams = Collections.emptyList(); - } + dataStreams = in.readStringList(); } /** @@ -836,9 +829,7 @@ public void writeTo(final StreamOutput out) throws IOException { } out.writeOptionalBoolean(includeGlobalState); out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeStringCollection(dataStreams); } private static SnapshotState snapshotState(final String reason, final List shardFailures) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 22a85558e3b1d..86dbc31245b11 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -67,7 +67,6 @@ import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequestDeduplicator; @@ -276,11 +275,6 @@ private void startNewShards(SnapshotsInProgress.Entry entry, Map() { @Override public void onResponse(String newGeneration) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index e53c2889f88e6..0123c839e4ad9 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -38,7 +38,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -141,18 +140,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; - - public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; - - public static final Version SHARD_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_6_0; - - public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); - - public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; - private static final Logger logger = LogManager.getLogger(SnapshotsService.class); public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status"; @@ -168,9 +155,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final Map>>> snapshotCompletionListeners = new ConcurrentHashMap<>(); - // Set of snapshots that are currently being initialized by this node - private final Set initializingSnapshots = Collections.synchronizedSet(new HashSet<>()); - /** * Listeners for snapshot deletion keyed by delete uuid as returned from {@link SnapshotDeletionsInProgress.Entry#uuid()} */ @@ -285,18 +269,10 @@ public ClusterState execute(ClusterState currentState) { final List runningSnapshots = snapshots.entries(); ensureSnapshotNameNotRunning(runningSnapshots, repositoryName, snapshotName); validate(repositoryName, snapshotName, currentState); - final boolean concurrentOperationsAllowed = currentState.nodes().getMinNodeVersion().onOrAfter(FULL_CONCURRENCY_VERSION); final SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY ); - if (deletionsInProgress.hasDeletionsInProgress() && concurrentOperationsAllowed == false) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY @@ -308,13 +284,6 @@ public ClusterState execute(ClusterState currentState) { "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" ); } - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the - // cluster state anyway in #applyClusterState. - if (concurrentOperationsAllowed == false && runningSnapshots.stream().anyMatch(entry -> entry.state() != State.INIT)) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } ensureNoCleanupInProgress(currentState, repositoryName, snapshotName); ensureBelowConcurrencyLimit(repositoryName, snapshotName, snapshots, deletionsInProgress); // Store newSnapshot here to be processed in clusterStateProcessed @@ -339,7 +308,6 @@ public ClusterState execute(ClusterState currentState) { currentState.metadata(), currentState.routingTable(), indexIds, - useShardGenerations(version), repositoryData, repositoryName ); @@ -1817,16 +1785,6 @@ public void deleteSnapshots(final DeleteSnapshotRequest request, final ActionLis @Override public ClusterState execute(ClusterState currentState) throws Exception { - final Version minNodeVersion = currentState.nodes().getMinNodeVersion(); - if (snapshotNames.length > 1 && minNodeVersion.before(MULTI_DELETE_VERSION)) { - throw new IllegalArgumentException( - "Deleting multiple snapshots in a single request is only supported in version [ " - + MULTI_DELETE_VERSION - + "] but cluster contained node of version [" - + currentState.nodes().getMinNodeVersion() - + "]" - ); - } final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List snapshotEntries = findInProgressSnapshots(snapshots, snapshotNames, repoName); final List snapshotIds = matchingSnapshotIds( @@ -1835,76 +1793,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { snapshotNames, repoName ); - if (snapshotEntries.isEmpty() || minNodeVersion.onOrAfter(SnapshotsService.FULL_CONCURRENCY_VERSION)) { - deleteFromRepoTask = createDeleteStateUpdate(snapshotIds, repoName, repositoryData, Priority.NORMAL, listener); - return deleteFromRepoTask.execute(currentState); - } - assert snapshotEntries.size() == 1 : "Expected just a single running snapshot but saw " + snapshotEntries; - final SnapshotsInProgress.Entry snapshotEntry = snapshotEntries.get(0); - runningSnapshot = snapshotEntry.snapshot(); - final ImmutableOpenMap shards; - - final State state = snapshotEntry.state(); - final String failure; - - outstandingDeletes = new ArrayList<>(snapshotIds); - if (state != State.INIT) { - // INIT state snapshots won't ever be physically written to the repository but all other states will end up in the repo - outstandingDeletes.add(runningSnapshot.getSnapshotId()); - } - if (state == State.INIT) { - // snapshot is still initializing, mark it as aborted - shards = snapshotEntry.shards(); - assert shards.isEmpty(); - failure = "Snapshot was aborted during initialization"; - abortedDuringInit = true; - } else if (state == State.STARTED) { - // snapshot is started - mark every non completed shard as aborted - final SnapshotsInProgress.Entry abortedEntry = snapshotEntry.abort(); - shards = abortedEntry.shards(); - failure = abortedEntry.failure(); - } else { - boolean hasUncompletedShards = false; - // Cleanup in case a node gone missing and snapshot wasn't updated for some reason - for (ObjectCursor shardStatus : snapshotEntry.shards().values()) { - // Check if we still have shard running on existing nodes - if (shardStatus.value.state().completed() == false - && shardStatus.value.nodeId() != null - && currentState.nodes().get(shardStatus.value.nodeId()) != null) { - hasUncompletedShards = true; - break; - } - } - if (hasUncompletedShards) { - // snapshot is being finalized - wait for shards to complete finalization process - logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes"); - return currentState; - } else { - // no shards to wait for but a node is gone - this is the only case - // where we force to finish the snapshot - logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); - shards = snapshotEntry.shards(); - } - failure = snapshotEntry.failure(); - } - return ClusterState.builder(currentState) - .putCustom( - SnapshotsInProgress.TYPE, - SnapshotsInProgress.of( - snapshots.entries() - .stream() - // remove init state snapshot we found from a previous cluster-manager if there was one - .filter(existing -> abortedDuringInit == false || existing.equals(snapshotEntry) == false) - .map(existing -> { - if (existing.equals(snapshotEntry)) { - return snapshotEntry.fail(shards, State.ABORTED, failure); - } - return existing; - }) - .collect(Collectors.toList()) - ) - ) - .build(); + deleteFromRepoTask = createDeleteStateUpdate(snapshotIds, repoName, repositoryData, Priority.NORMAL, listener); + return deleteFromRepoTask.execute(currentState); } @Override @@ -2053,14 +1943,6 @@ public ClusterState execute(ClusterState currentState) { SnapshotDeletionsInProgress.EMPTY ); final Version minNodeVersion = currentState.nodes().getMinNodeVersion(); - if (minNodeVersion.before(FULL_CONCURRENCY_VERSION)) { - if (deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - new Snapshot(repoName, snapshotIds.get(0)), - "cannot delete - another snapshot is currently being deleted in [" + deletionsInProgress + "]" - ); - } - } final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom( RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY @@ -2100,44 +1982,32 @@ public ClusterState execute(ClusterState currentState) { } // Snapshot ids that will have to be physically deleted from the repository final Set snapshotIdsRequiringCleanup = new HashSet<>(snapshotIds); - final SnapshotsInProgress updatedSnapshots; - if (minNodeVersion.onOrAfter(FULL_CONCURRENCY_VERSION)) { - updatedSnapshots = SnapshotsInProgress.of(snapshots.entries().stream().map(existing -> { - if (existing.state() == State.STARTED - && snapshotIdsRequiringCleanup.contains(existing.snapshot().getSnapshotId())) { - // snapshot is started - mark every non completed shard as aborted - final SnapshotsInProgress.Entry abortedEntry = existing.abort(); - if (abortedEntry == null) { - // No work has been done for this snapshot yet so we remove it from the cluster state directly - final Snapshot existingNotYetStartedSnapshot = existing.snapshot(); - // Adding the snapshot to #endingSnapshots since we still have to resolve its listeners to not trip - // any leaked listener assertions - if (endingSnapshots.add(existingNotYetStartedSnapshot)) { - completedNoCleanup.add(existingNotYetStartedSnapshot); - } - snapshotIdsRequiringCleanup.remove(existingNotYetStartedSnapshot.getSnapshotId()); - } else if (abortedEntry.state().completed()) { - completedWithCleanup.add(abortedEntry); + final SnapshotsInProgress updatedSnapshots = SnapshotsInProgress.of(snapshots.entries().stream().map(existing -> { + if (existing.state() == State.STARTED && snapshotIdsRequiringCleanup.contains(existing.snapshot().getSnapshotId())) { + // snapshot is started - mark every non completed shard as aborted + final SnapshotsInProgress.Entry abortedEntry = existing.abort(); + if (abortedEntry == null) { + // No work has been done for this snapshot yet so we remove it from the cluster state directly + final Snapshot existingNotYetStartedSnapshot = existing.snapshot(); + // Adding the snapshot to #endingSnapshots since we still have to resolve its listeners to not trip + // any leaked listener assertions + if (endingSnapshots.add(existingNotYetStartedSnapshot)) { + completedNoCleanup.add(existingNotYetStartedSnapshot); } - return abortedEntry; + snapshotIdsRequiringCleanup.remove(existingNotYetStartedSnapshot.getSnapshotId()); + } else if (abortedEntry.state().completed()) { + completedWithCleanup.add(abortedEntry); } - return existing; - }).filter(Objects::nonNull).collect(Collectors.toList())); - if (snapshotIdsRequiringCleanup.isEmpty()) { - // We only saw snapshots that could be removed from the cluster state right away, no need to update the deletions - return updateWithSnapshots(currentState, updatedSnapshots, null); + return abortedEntry; } - } else { - if (snapshots.entries().isEmpty() == false) { - // However other snapshots are running - cannot continue - throw new ConcurrentSnapshotExecutionException( - repoName, - snapshotIds.toString(), - "another snapshot is currently running cannot delete" - ); - } - updatedSnapshots = snapshots; + return existing; + }).filter(Objects::nonNull).collect(Collectors.toList())); + + if (snapshotIdsRequiringCleanup.isEmpty()) { + // We only saw snapshots that could be removed from the cluster state right away, no need to update the deletions + return updateWithSnapshots(currentState, updatedSnapshots, null); } + // add the snapshot deletion to the cluster state final SnapshotDeletionsInProgress.Entry replacedEntry = deletionsInProgress.getEntries() .stream() @@ -2266,41 +2136,11 @@ public Version minCompatibleVersion(Version minNodeVersion, RepositoryData repos .filter(excluded == null ? sn -> true : sn -> excluded.contains(sn) == false) .collect(Collectors.toList())) { final Version known = repositoryData.getVersion(snapshotId); - // If we don't have the version cached in the repository data yet we load it from the snapshot info blobs - if (known == null) { - assert repositoryData.shardGenerations().totalShards() == 0 : "Saw shard generations [" - + repositoryData.shardGenerations() - + "] but did not have versions tracked for snapshot [" - + snapshotId - + "]"; - return OLD_SNAPSHOT_FORMAT; - } else { - minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; - } + minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; } return minCompatVersion; } - /** - * Checks whether the metadata version supports writing {@link ShardGenerations} to the repository. - * - * @param repositoryMetaVersion version to check - * @return true if version supports {@link ShardGenerations} - */ - public static boolean useShardGenerations(Version repositoryMetaVersion) { - return repositoryMetaVersion.onOrAfter(SHARD_GEN_IN_REPO_DATA_VERSION); - } - - /** - * Checks whether the metadata version supports writing {@link ShardGenerations} to the repository. - * - * @param repositoryMetaVersion version to check - * @return true if version supports {@link ShardGenerations} - */ - public static boolean useIndexGenerations(Version repositoryMetaVersion) { - return repositoryMetaVersion.onOrAfter(INDEX_GEN_IN_REPO_DATA_VERSION); - } - /** Deletes snapshot from repository * * @param deleteEntry delete entry in cluster state @@ -2578,7 +2418,6 @@ private SnapshotsInProgress updatedSnapshotsInProgress(ClusterState currentState currentState.metadata(), currentState.routingTable(), entry.indices(), - entry.version().onOrAfter(SHARD_GEN_IN_REPO_DATA_VERSION), repositoryData, repoName ); @@ -2677,7 +2516,6 @@ private static void completeListenersIgnoringException(@Nullable List shards( @@ -2686,7 +2524,6 @@ private static ImmutableOpenMap indices, - boolean useShardGenerations, RepositoryData repositoryData, String repoName ) { @@ -2712,18 +2549,15 @@ private static ImmutableOpenMap RepositoryData.snapshotsFromXContent(xParser, corruptedRepositoryData.getGenId(), randomBoolean()) + () -> RepositoryData.snapshotsFromXContent(xParser, corruptedRepositoryData.getGenId()) ); assertThat( e.getMessage(), @@ -327,7 +327,7 @@ public void testIndexThatReferenceANullSnapshot() throws IOException { try (XContentParser xParser = createParser(builder)) { OpenSearchParseException e = expectThrows( OpenSearchParseException.class, - () -> RepositoryData.snapshotsFromXContent(xParser, randomNonNegativeLong(), randomBoolean()) + () -> RepositoryData.snapshotsFromXContent(xParser, randomNonNegativeLong()) ); assertThat( e.getMessage(), diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index f9bc0d1066d8e..0702866af35e3 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -136,7 +136,7 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { - repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen, false); + repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen); } assertIndexUUIDs(repository, repositoryData); assertSnapshotUUIDs(repository, repositoryData); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index bbac1ef591418..e5eac9af13de1 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -419,8 +419,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version DeprecationHandler.THROW_UNSUPPORTED_OPERATION, Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString()) ), - repositoryData.getGenId(), - randomBoolean() + repositoryData.getGenId() ); Files.write( repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()), @@ -512,7 +511,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map Date: Wed, 19 Oct 2022 18:26:35 +0000 Subject: [PATCH 072/122] Support of GeoJson Point for GeoPoint field (#4597) * Support of GeoJson Point for GeoPoint field See https://github.com/opensearch-project/geospatial/issues/152 Signed-off-by: Heemin Kim * Refactored code based on comments Signed-off-by: Heemin Kim * Add unit tests for GeoJson support Signed-off-by: Heemin Kim * Resolve comments * Remove negation expression * Ignore case for GeoJson Point type to be consistent with geo_shape parsing Signed-off-by: Heemin Kim * Added yaml test for geopoint Signed-off-by: Heemin Kim Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../rest-api-spec/test/index/80_geo_point.yml | 145 +++++++++ .../org/opensearch/common/geo/GeoPoint.java | 6 +- .../org/opensearch/common/geo/GeoUtils.java | 280 ++++++++++++------ .../AbstractPointGeometryFieldMapper.java | 73 +++-- .../mapper/GeoPointFieldMapperTests.java | 53 ++++ .../search/geo/GeoPointParsingTests.java | 17 +- .../index/search/geo/GeoUtilsTests.java | 80 ++++- 8 files changed, 523 insertions(+), 132 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index d294eb68ee7c9..b0fa8716327ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) +- Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml new file mode 100644 index 0000000000000..d15529af0041a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml @@ -0,0 +1,145 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + location: + type: geo_point + +--- +"Single point test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + lon: 52.374081 + lat: 4.912350 + - index: + _index: test_1 + _id: 2 + - location: "4.901618,52.369219" + - index: + _index: test_1 + _id: 3 + - location: [ 52.371667, 4.914722 ] + - index: + _index: test_1 + _id: 4 + - location: "POINT (52.371667 4.914722)" + - index: + _index: test_1 + _id: 5 + - location: "t0v5zsq1gpzf" + - index: + _index: test_1 + _id: 6 + - location: + type: Point + coordinates: [ 52.371667, 4.914722 ] + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 0 } + +--- +"Multi points test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + - {lon: 52.374081, lat: 4.912350} + - {lon: 152.374081, lat: 14.912350} + - index: + _index: test_1 + _id: 2 + - location: + - "4.901618,52.369219" + - "14.901618,152.369219" + - index: + _index: test_1 + _id: 3 + - location: + - [ 52.371667, 4.914722 ] + - [ 152.371667, 14.914722 ] + - index: + _index: test_1 + _id: 4 + - location: + - "POINT (52.371667 4.914722)" + - "POINT (152.371667 14.914722)" + - index: + _index: test_1 + _id: 5 + - location: + - "t0v5zsq1gpzf" + - "x6skg0zbhnum" + - index: + _index: test_1 + _id: 6 + - location: + - {type: Point, coordinates: [ 52.371667, 4.914722 ]} + - {type: Point, coordinates: [ 152.371667, 14.914722 ]} + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 6 } diff --git a/server/src/main/java/org/opensearch/common/geo/GeoPoint.java b/server/src/main/java/org/opensearch/common/geo/GeoPoint.java index a2b06dccded8c..c59a9046e0318 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoPoint.java @@ -119,7 +119,11 @@ public GeoPoint resetFromString(String value, final boolean ignoreZValue, Effect public GeoPoint resetFromCoordinates(String value, final boolean ignoreZValue) { String[] vals = value.split(","); if (vals.length > 3) { - throw new OpenSearchParseException("failed to parse [{}], expected 2 or 3 coordinates " + "but found: [{}]", vals.length); + throw new OpenSearchParseException( + "failed to parse [{}], expected 2 or 3 coordinates " + "but found: [{}]", + value, + vals.length + ); } final double lat; final double lon; diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 5534967d559d6..96be418c85179 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -40,10 +40,10 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.common.xcontent.XContentSubParser; import org.opensearch.common.xcontent.support.MapXContentParser; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.geometry.ShapeType; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.GeoPointValues; import org.opensearch.index.fielddata.MultiGeoPointValues; @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; /** * Useful geo utilities @@ -60,7 +61,8 @@ * @opensearch.internal */ public class GeoUtils { - + private static final String ERR_MSG_INVALID_TOKEN = "token [{}] not allowed"; + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; /** Maximum valid latitude in degrees. */ public static final double MAX_LAT = 90.0; /** Minimum valid latitude in degrees. */ @@ -74,6 +76,8 @@ public class GeoUtils { public static final String LONGITUDE = "lon"; public static final String GEOHASH = "geohash"; + public static final String GEOJSON_TYPE = "type"; + public static final String GEOJSON_COORDS = "coordinates"; /** Earth ellipsoid major axis defined by WGS 84 in meters */ public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84) @@ -444,112 +448,202 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * *
      - *
    • Object:
      {"lat": <latitude>, "lon": <longitude>}
    • - *
    • String:
      "<latitude>,<longitude>"
    • - *
    • Geohash:
      "<geohash>"
    • - *
    • Array:
      [<longitude>,<latitude>]
    • + *
    • Object:
      {@code {"lat": , "lon": 
    • + *
    • String:
      {@code ","}
    • + *
    • GeoHash:
      {@code ""}
    • + *
    • WKT:
      {@code "POINT ( )"}
    • + *
    • Array:
      {@code [, ]}
    • + *
    • GeoJson:
      {@code {"type": "Point", "coordinates": [, ]}}
    • *
    * + * * @param parser {@link XContentParser} to parse the value from * @param point A {@link GeoPoint} that will be reset by the values parsed + * @param ignoreZValue tells to ignore z value or throw exception when there is a z value + * @param effectivePoint tells which point to use for GeoHash form * @return new {@link GeoPoint} parsed from the parse */ - public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue, EffectivePoint effectivePoint) - throws IOException, OpenSearchParseException { - double lat = Double.NaN; - double lon = Double.NaN; - String geohash = null; - NumberFormatException numberFormatException = null; - - if (parser.currentToken() == Token.START_OBJECT) { - try (XContentSubParser subParser = new XContentSubParser(parser)) { - while (subParser.nextToken() != Token.END_OBJECT) { - if (subParser.currentToken() == Token.FIELD_NAME) { - String field = subParser.currentName(); - if (LATITUDE.equals(field)) { - subParser.nextToken(); - switch (subParser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lat = subParser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new OpenSearchParseException("latitude must be a number"); - } - } else if (LONGITUDE.equals(field)) { - subParser.nextToken(); - switch (subParser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lon = subParser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new OpenSearchParseException("longitude must be a number"); - } - } else if (GEOHASH.equals(field)) { - if (subParser.nextToken() == Token.VALUE_STRING) { - geohash = subParser.text(); - } else { - throw new OpenSearchParseException("geohash must be a string"); - } - } else { - throw new OpenSearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); - } - } else { - throw new OpenSearchParseException("token [{}] not allowed", subParser.currentToken()); + public static GeoPoint parseGeoPoint( + final XContentParser parser, + final GeoPoint point, + final boolean ignoreZValue, + final EffectivePoint effectivePoint + ) throws IOException, OpenSearchParseException { + switch (parser.currentToken()) { + case START_OBJECT: + parseGeoPointObject(parser, point, ignoreZValue, effectivePoint); + break; + case START_ARRAY: + parseGeoPointArray(parser, point, ignoreZValue); + break; + case VALUE_STRING: + String val = parser.text(); + point.resetFromString(val, ignoreZValue, effectivePoint); + break; + default: + throw new OpenSearchParseException("geo_point expected"); + } + return point; + } + + private static GeoPoint parseGeoPointObject( + final XContentParser parser, + final GeoPoint point, + final boolean ignoreZValue, + final GeoUtils.EffectivePoint effectivePoint + ) throws IOException { + try (XContentSubParser subParser = new XContentSubParser(parser)) { + if (subParser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw new OpenSearchParseException(ERR_MSG_INVALID_TOKEN, subParser.currentToken()); + } + + String field = subParser.currentName(); + if (LONGITUDE.equals(field) || LATITUDE.equals(field)) { + parseGeoPointObjectBasicFields(subParser, point); + } else if (GEOHASH.equals(field)) { + parseGeoHashFields(subParser, point, effectivePoint); + } else if (GEOJSON_TYPE.equals(field) || GEOJSON_COORDS.equals(field)) { + parseGeoJsonFields(subParser, point, ignoreZValue); + } else { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + if (subParser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + return point; + } + } + + private static GeoPoint parseGeoPointObjectBasicFields(final XContentParser parser, final GeoPoint point) throws IOException { + HashMap data = new HashMap<>(); + for (int i = 0; i < 2; i++) { + if (i != 0) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + break; + } + + String field = parser.currentName(); + if (LONGITUDE.equals(field) == false && LATITUDE.equals(field) == false) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + switch (parser.nextToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + data.put(field, parser.doubleValue(true)); + } catch (NumberFormatException e) { + throw new OpenSearchParseException("[{}] and [{}] must be valid double values", e, LONGITUDE, LATITUDE); } + break; + default: + throw new OpenSearchParseException("{} must be a number", field); + } + } + + if (data.get(LONGITUDE) == null) { + throw new OpenSearchParseException("field [{}] missing", LONGITUDE); + } + if (data.get(LATITUDE) == null) { + throw new OpenSearchParseException("field [{}] missing", LATITUDE); + } + + return point.reset(data.get(LATITUDE), data.get(LONGITUDE)); + } + + private static GeoPoint parseGeoHashFields( + final XContentParser parser, + final GeoPoint point, + final GeoUtils.EffectivePoint effectivePoint + ) throws IOException { + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + throw new OpenSearchParseException(ERR_MSG_INVALID_TOKEN, parser.currentToken()); + } + + if (GEOHASH.equals(parser.currentName()) == false) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("{} must be a string", GEOHASH); + } + + return point.parseGeoHash(parser.text(), effectivePoint); + } + + private static GeoPoint parseGeoJsonFields(final XContentParser parser, final GeoPoint point, final boolean ignoreZValue) + throws IOException { + boolean hasTypePoint = false; + boolean hasCoordinates = false; + for (int i = 0; i < 2; i++) { + if (i != 0) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + if (hasTypePoint == false) { + throw new OpenSearchParseException("field [{}] missing", GEOJSON_TYPE); + } + if (hasCoordinates == false) { + throw new OpenSearchParseException("field [{}] missing", GEOJSON_COORDS); } } - if (geohash != null) { - if (!Double.isNaN(lat) || !Double.isNaN(lon)) { - throw new OpenSearchParseException("field must be either lat/lon or geohash"); - } else { - return point.parseGeoHash(geohash, effectivePoint); + + if (GEOJSON_TYPE.equals(parser.currentName())) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("{} must be a string", GEOJSON_TYPE); } - } else if (numberFormatException != null) { - throw new OpenSearchParseException("[{}] and [{}] must be valid double values", numberFormatException, LATITUDE, LONGITUDE); - } else if (Double.isNaN(lat)) { - throw new OpenSearchParseException("field [{}] missing", LATITUDE); - } else if (Double.isNaN(lon)) { - throw new OpenSearchParseException("field [{}] missing", LONGITUDE); + + // To be consistent with geo_shape parsing, ignore case here as well. + if (ShapeType.POINT.name().equalsIgnoreCase(parser.text()) == false) { + throw new OpenSearchParseException("{} must be Point", GEOJSON_TYPE); + } + hasTypePoint = true; + } else if (GEOJSON_COORDS.equals(parser.currentName())) { + if (parser.nextToken() != XContentParser.Token.START_ARRAY) { + throw new OpenSearchParseException("{} must be an array", GEOJSON_COORDS); + } + parseGeoPointArray(parser, point, ignoreZValue); + hasCoordinates = true; } else { - return point.reset(lat, lon); + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); } + } - } else if (parser.currentToken() == Token.START_ARRAY) { - try (XContentSubParser subParser = new XContentSubParser(parser)) { - int element = 0; - while (subParser.nextToken() != Token.END_ARRAY) { - if (subParser.currentToken() == Token.VALUE_NUMBER) { - element++; - if (element == 1) { - lon = subParser.doubleValue(); - } else if (element == 2) { - lat = subParser.doubleValue(); - } else if (element == 3) { - GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); - } else { - throw new OpenSearchParseException("[geo_point] field type does not accept > 3 dimensions"); - } - } else { - throw new OpenSearchParseException("numeric value expected"); - } + return point; + } + + private static GeoPoint parseGeoPointArray(final XContentParser parser, final GeoPoint point, final boolean ignoreZValue) + throws IOException { + try (XContentSubParser subParser = new XContentSubParser(parser)) { + double x = Double.NaN; + double y = Double.NaN; + + int element = 0; + while (subParser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { + throw new OpenSearchParseException("numeric value expected"); + } + element++; + if (element == 1) { + x = parser.doubleValue(); + } else if (element == 2) { + y = parser.doubleValue(); + } else if (element == 3) { + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + } else { + throw new OpenSearchParseException("[geo_point] field type does not accept more than 3 values"); } } - return point.reset(lat, lon); - } else if (parser.currentToken() == Token.VALUE_STRING) { - String val = parser.text(); - return point.resetFromString(val, ignoreZValue, effectivePoint); - } else { - throw new OpenSearchParseException("geo_point expected"); + + if (element < 2) { + throw new OpenSearchParseException("[geo_point] field type should have at least two dimensions"); + } + return point.reset(y, x); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java index b546eeca1ec0a..305351ca63b9a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -36,11 +36,14 @@ import org.opensearch.common.CheckedBiFunction; import org.opensearch.common.Explicit; import org.opensearch.common.ParseField; -import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.geo.GeometryFormat; import org.opensearch.common.geo.GeometryParser; +import org.opensearch.common.xcontent.DeprecationHandler; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.Point; @@ -57,7 +60,7 @@ import static org.opensearch.index.mapper.TypeParsers.parseField; /** - * Base class for for spatial fields that only support indexing points + * Base class for spatial fields that only support indexing points * * @opensearch.internal */ @@ -281,32 +284,27 @@ private P process(P in) { @Override public List

    parse(XContentParser parser) throws IOException, ParseException { - if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - XContentParser.Token token = parser.nextToken(); - P point = pointSupplier.get(); - ArrayList

    points = new ArrayList<>(); - if (token == XContentParser.Token.VALUE_NUMBER) { - double x = parser.doubleValue(); - parser.nextToken(); - double y = parser.doubleValue(); - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_NUMBER) { - GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); - } else if (token != XContentParser.Token.END_ARRAY) { - throw new OpenSearchParseException("field type does not accept > 3 dimensions"); + parser.nextToken(); + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + XContentBuilder xContentBuilder = reConstructArrayXContent(parser); + try ( + XContentParser subParser = createParser( + parser.getXContentRegistry(), + parser.getDeprecationHandler(), + xContentBuilder + ); + ) { + return Collections.singletonList(process(objectParser.apply(subParser, pointSupplier.get()))); } - - point.resetCoords(x, y); - points.add(process(point)); } else { - while (token != XContentParser.Token.END_ARRAY) { - points.add(process(objectParser.apply(parser, point))); - point = pointSupplier.get(); - token = parser.nextToken(); + ArrayList

    points = new ArrayList<>(); + while (parser.currentToken() != XContentParser.Token.END_ARRAY) { + points.add(process(objectParser.apply(parser, pointSupplier.get()))); + parser.nextToken(); } + return points; } - return points; } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { if (nullValue == null) { return null; @@ -318,6 +316,35 @@ public List

    parse(XContentParser parser) throws IOException, ParseException { } } + private XContentParser createParser( + NamedXContentRegistry namedXContentRegistry, + DeprecationHandler deprecationHandler, + XContentBuilder xContentBuilder + ) throws IOException { + XContentParser subParser = xContentBuilder.contentType() + .xContent() + .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); + subParser.nextToken(); + return subParser; + } + + private XContentBuilder reConstructArrayXContent(XContentParser parser) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder().startArray(); + int count = 0; + while (parser.currentToken() != XContentParser.Token.END_ARRAY) { + if (++count > 3) { + break; + } + if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { + throw new OpenSearchParseException("numeric value expected"); + } + builder.value(parser.doubleValue()); + parser.nextToken(); + } + builder.endArray(); + return builder; + } + @Override public Object format(List

    points, String format) { List result = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 6a6e6b190bff3..1b4c95d9ceb8f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -352,6 +352,59 @@ public void testInvalidGeopointValuesIgnored() throws Exception { ); } + public void testGeoJsonSingleValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2 }).endObject()) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + } + + public void testGeoJsonArray() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse( + source( + b -> b.startArray("field") + .startObject() + .field("type", "Point") + .array("coordinates", new double[] { 1.1, 1.2 }) + .endObject() + .startObject() + .field("type", "Point") + .array("coordinates", new double[] { 1.3, 1.4 }) + .endObject() + .endArray() + ) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + assertThat(doc.rootDoc().getFields("field"), arrayWithSize(4)); + } + + public void testGeoJsonIgnoreZValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", true))); + ParsedDocument doc = mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2, 1.3 }).endObject()) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + } + + public void testGeoJsonZValueException() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", false))); + Exception e = expectThrows( + MapperParsingException.class, + () -> mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2, 1.3 }).endObject()) + ) + ); + assertThat(e.getCause().getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + + public void testGeoJsonIgnoreInvalidForm() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_malformed", "true"))); + ParsedDocument doc = mapper.parse(source(b -> b.startObject("field").array("coordinates", new double[] { 1.1, 1.2 }).endObject())); + assertThat(doc.rootDoc().getField("field"), nullValue()); + } + @Override protected GeoPointFieldMapper.Builder newBuilder() { return new GeoPointFieldMapper.Builder("geo"); diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java index 6b937ad881f00..ea8d660b78157 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java @@ -50,6 +50,7 @@ import static org.hamcrest.Matchers.is; public class GeoPointParsingTests extends OpenSearchTestCase { + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; private static final double TOLERANCE = 1E-5; public void testGeoPointReset() throws IOException { @@ -142,12 +143,12 @@ public void testInvalidPointEmbeddedObject() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -160,12 +161,12 @@ public void testInvalidPointLatHashMix() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -179,12 +180,12 @@ public void testInvalidPointLonHashMix() throws IOException { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -197,13 +198,13 @@ public void testInvalidField() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java index bb4b057d01414..f5e15b0342028 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java @@ -35,6 +35,8 @@ import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; import org.opensearch.OpenSearchParseException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; @@ -43,12 +45,10 @@ import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.geometry.utils.Geohash; import org.opensearch.test.OpenSearchTestCase; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; +import org.opensearch.test.geo.RandomGeoGenerator; import java.io.IOException; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; @@ -57,8 +57,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; public class GeoUtilsTests extends OpenSearchTestCase { + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; private static final char[] BASE_32 = { '0', '1', @@ -601,7 +603,7 @@ public void testParseGeoPointLonWrongType() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("longitude must be a number")); + assertThat(e.getMessage(), is("lon must be a number")); assertThat(parser.currentToken(), is(Token.END_OBJECT)); assertNull(parser.nextToken()); } @@ -613,7 +615,7 @@ public void testParseGeoPointLatWrongType() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("latitude must be a number")); + assertThat(e.getMessage(), is("lat must be a number")); assertThat(parser.currentToken(), is(Token.END_OBJECT)); assertNull(parser.nextToken()); } @@ -626,7 +628,7 @@ public void testParseGeoPointExtraField() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -638,7 +640,7 @@ public void testParseGeoPointLonLatGeoHash() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), containsString(ERR_MSG_INVALID_FIELDS)); } } @@ -698,6 +700,70 @@ public void testParseGeoPointInvalidType() throws IOException { } } + public void testParserGeoPointGeoJson() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder json1 = jsonBuilder().startObject().field("type", "Point").array("coordinates", coordinates).endObject(); + try (XContentParser parser = createParser(json1)) { + parser.nextToken(); + GeoPoint paredPoint = GeoUtils.parseGeoPoint(parser); + assertEquals(geoPoint, paredPoint); + } + + XContentBuilder json2 = jsonBuilder().startObject().field("type", "PoInT").array("coordinates", coordinates).endObject(); + try (XContentParser parser = createParser(json2)) { + parser.nextToken(); + GeoPoint paredPoint = GeoUtils.parseGeoPoint(parser); + assertEquals(geoPoint, paredPoint); + } + } + + public void testParserGeoPointGeoJsonMissingField() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder missingType = jsonBuilder().startObject().array("coordinates", coordinates).endObject(); + expectParseException(missingType, "field [type] missing"); + + XContentBuilder missingCoordinates = jsonBuilder().startObject().field("type", "Point").endObject(); + expectParseException(missingCoordinates, "field [coordinates] missing"); + } + + public void testParserGeoPointGeoJsonUnknownField() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder unknownField = jsonBuilder().startObject() + .field("type", "Point") + .array("coordinates", coordinates) + .field("unknown", "value") + .endObject(); + expectParseException(unknownField, "field must be either [lon|lat], [type|coordinates], or [geohash]"); + } + + public void testParserGeoPointGeoJsonInvalidValue() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder invalidGeoJsonType = jsonBuilder().startObject() + .field("type", "invalid") + .array("coordinates", coordinates) + .endObject(); + expectParseException(invalidGeoJsonType, "type must be Point"); + + String[] coordinatesInString = { String.valueOf(geoPoint.getLon()), String.valueOf(geoPoint.getLat()) }; + XContentBuilder invalideCoordinatesType = jsonBuilder().startObject() + .field("type", "Point") + .array("coordinates", coordinatesInString) + .endObject(); + expectParseException(invalideCoordinatesType, "numeric value expected"); + } + + private void expectParseException(XContentBuilder content, String errMsg) throws IOException { + try (XContentParser parser = createParser(content)) { + parser.nextToken(); + OpenSearchParseException ex = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertEquals(errMsg, ex.getMessage()); + } + } + public void testPrefixTreeCellSizes() { assertThat(GeoUtils.EARTH_SEMI_MAJOR_AXIS, equalTo(DistanceUtils.EARTH_EQUATORIAL_RADIUS_KM * 1000)); assertThat(GeoUtils.quadTreeCellWidth(0), lessThanOrEqualTo(GeoUtils.EARTH_EQUATOR)); From 04eb8171b8e24222282d430e0b6514822778a77c Mon Sep 17 00:00:00 2001 From: Nishchay Malhotra <114057571+nishchay21@users.noreply.github.com> Date: Thu, 20 Oct 2022 01:04:10 +0530 Subject: [PATCH 073/122] Adding feature to exclude indexes starting with dot from shard validator (#4695) * Adding feature to exclude indexes starting with dot from shard validator Signed-off-by: Nishchay Malhotra --- CHANGELOG.md | 1 + .../cluster/shards/ClusterShardLimitIT.java | 306 +++++++++++++++++- .../common/settings/ClusterSettings.java | 1 + .../indices/ShardLimitValidator.java | 71 +++- .../MetadataCreateIndexServiceTests.java | 10 +- .../MetadataIndexTemplateServiceTests.java | 4 +- .../indices/ShardLimitValidatorTests.java | 229 ++++++++++++- 7 files changed, 601 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0fa8716327ea..aae68b7f715d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -163,6 +163,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) +- Added feature to ignore indexes starting with dot during shard limit validation.([#4695](https://github.com/opensearch-project/OpenSearch/pull/4695)) ### Changed ### Deprecated ### Removed diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a92849a077376..a88d42c07f8d6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -43,15 +43,28 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Priority; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.internal.io.IOUtils; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.NodeConfigurationSource; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.nio.MockNioTransportPlugin; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.function.Function; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -63,12 +76,18 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends OpenSearchIntegTestCase { private static final String shardsPerNodeKey = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + private static final String ignoreDotIndexKey = ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(); public void testSettingClusterMaxShards() { int shardsPerNode = between(1, 500_000); setShardsPerNode(shardsPerNode); } + public void testSettingIgnoreDotIndexes() { + boolean ignoreDotIndexes = randomBoolean(); + setIgnoreDotIndex(ignoreDotIndexes); + } + public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { @@ -100,7 +119,6 @@ public void testIndexCreationOverLimit() { ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); setShardsPerNode(counts.getShardsPerNode()); - // Create an index that will bring us up to the limit createIndex( "test", @@ -127,6 +145,164 @@ public void testIndexCreationOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } + /** + * The test checks if the index starting with the dot can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would succeed. + */ + public void testIndexCreationOverLimitForDotIndexesSucceeds() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, dataNodes * dataNodes) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + createIndex( + ".test-index", + Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + clusterState = client().admin().cluster().prepareState().get().getState(); + assertTrue(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the dot should not be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to false. If the cluster.ignore_Dot_indexes is set to false index creation of + * indexes starting with dot would fail as well. + */ + public void testIndexCreationOverLimitForDotIndexesFail() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the .ds- can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would only succeed and dataStream indexes would still have validation applied. + */ + public void testIndexCreationOverLimitForDataStreamIndexes() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".ds-test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".ds-test-index")); + } + public void testIndexCreationOverLimitFromTemplate() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); @@ -414,6 +590,100 @@ public void testOpenIndexOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } + public void testIgnoreDotSettingOnMultipleNodes() throws IOException, InterruptedException { + int maxAllowedShardsPerNode = 10, indexPrimaryShards = 11, indexReplicaShards = 1; + + InternalTestCluster cluster = new InternalTestCluster( + randomLong(), + createTempDir(), + true, + true, + 0, + 0, + "cluster", + new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(ClusterShardLimitIT.this.nodeSettings(nodeOrdinal)) + .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }, + 0, + "cluster-", + Arrays.asList( + TestSeedPlugin.class, + MockHttpTransport.TestPlugin.class, + MockTransportService.TestPlugin.class, + MockNioTransportPlugin.class, + InternalSettingsPlugin.class, + MockRepository.Plugin.class + ), + Function.identity() + ); + cluster.beforeTest(random()); + + // Starting 3 ClusterManagerOnlyNode nodes + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", true).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Starting 2 data nodes + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Setting max shards per node to be 10 + cluster.client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, maxAllowedShardsPerNode)) + .get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + + // As active ClusterManagerNode setting takes precedence killing the active one. + // This would be the first one where cluster.ignore_dot_indexes is true because the above calls are blocking. + cluster.stopCurrentClusterManagerNode(); + + // Waiting for all shards to get assigned + cluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + try { + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index1") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + } catch (IllegalArgumentException e) { + ClusterHealthResponse clusterHealth = cluster.client().admin().cluster().prepareHealth().get(); + int currentActiveShards = clusterHealth.getActiveShards(); + int dataNodeCount = clusterHealth.getNumberOfDataNodes(); + int extraShardCount = indexPrimaryShards * (1 + indexReplicaShards); + verifyException(maxAllowedShardsPerNode * dataNodeCount, currentActiveShards, extraShardCount, e); + } + + IOUtils.close(cluster); + } + private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); @@ -457,6 +727,29 @@ private void setShardsPerNode(int shardsPerNode) { } } + private void setIgnoreDotIndex(boolean ignoreDotIndex) { + try { + ClusterUpdateSettingsResponse response; + if (frequently()) { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getPersistentSettings().getAsBoolean(ignoreDotIndexKey, true)); + } else { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getTransientSettings().getAsBoolean(ignoreDotIndexKey, true)); + } + } catch (IllegalArgumentException ex) { + fail(ex.getMessage()); + } + } + private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentException e) { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); @@ -471,4 +764,15 @@ private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentE assertEquals(expectedError, e.getMessage()); } + private void verifyException(int maxShards, int currentShards, int extraShards, IllegalArgumentException e) { + String expectedError = "Validation Failed: 1: this action would add [" + + extraShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;"; + assertEquals(expectedError, e.getMessage()); + } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 320cb5457b21c..12a3a883af347 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -255,6 +255,7 @@ public void apply(Settings value, Settings current, Settings previous) { Metadata.SETTING_READ_ONLY_SETTING, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING, ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, + ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index a5ec6cbecaf55..e803e387448bc 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.ValidationException; @@ -64,12 +65,23 @@ public class ShardLimitValidator { Setting.Property.Dynamic, Setting.Property.NodeScope ); + + public static final Setting SETTING_CLUSTER_IGNORE_DOT_INDEXES = Setting.boolSetting( + "cluster.ignore_dot_indexes", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + protected final AtomicInteger shardLimitPerNode = new AtomicInteger(); private final SystemIndices systemIndices; + private volatile boolean ignoreDotIndexes; public ShardLimitValidator(final Settings settings, ClusterService clusterService, SystemIndices systemIndices) { this.shardLimitPerNode.set(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(settings)); + this.ignoreDotIndexes = SETTING_CLUSTER_IGNORE_DOT_INDEXES.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_NODE, this::setShardLimitPerNode); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_IGNORE_DOT_INDEXES, this::setIgnoreDotIndexes); this.systemIndices = systemIndices; } @@ -85,8 +97,15 @@ public int getShardLimitPerNode() { return shardLimitPerNode.get(); } + private void setIgnoreDotIndexes(boolean newValue) { + this.ignoreDotIndexes = newValue; + } + /** * Checks whether an index can be created without going over the cluster shard limit. + * Validate shard limit only for non system indices as it is not hard limit anyways. + * Further also validates if the cluster.ignore_dot_indexes is set to true. + * If so then it does not validate any index which starts with '.' except data-stream index. * * @param indexName the name of the index being created * @param settings the settings of the index to be created @@ -94,8 +113,12 @@ public int getShardLimitPerNode() { * @throws ValidationException if creating this index would put the cluster over the cluster shard limit */ public void validateShardLimit(final String indexName, final Settings settings, final ClusterState state) { - // Validate shard limit only for non system indices as it is not hard limit anyways - if (systemIndices.validateSystemIndex(indexName)) { + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.'. + */ + if (shouldIndexBeIgnored(indexName)) { return; } @@ -113,7 +136,9 @@ public void validateShardLimit(final String indexName, final Settings settings, /** * Validates whether a list of indices can be opened without going over the cluster shard limit. Only counts indices which are - * currently closed and will be opened, ignores indices which are already open. + * currently closed and will be opened, ignores indices which are already open. Adding to this it validates the + * shard limit only for non system indices and if the cluster.ignore_dot_indexes property is set to true + * then the indexes starting with '.' are ignored except the data-stream indexes. * * @param currentState The current cluster state. * @param indicesToOpen The indices which are to be opened. @@ -121,8 +146,13 @@ public void validateShardLimit(final String indexName, final Settings settings, */ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) { int shardsToOpen = Arrays.stream(indicesToOpen) - // Validate shard limit only for non system indices as it is not hard limit anyways - .filter(index -> !systemIndices.validateSystemIndex(index.getName())) + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.' + however data-stream indexes are still validated. + */ + .filter(index -> !shouldIndexBeIgnored(index.getName())) .filter(index -> currentState.metadata().index(index).getState().equals(IndexMetadata.State.CLOSE)) .mapToInt(index -> getTotalShardCount(currentState, index)) .sum(); @@ -140,6 +170,37 @@ private static int getTotalShardCount(ClusterState state, Index index) { return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); } + /** + * Returns true if the index should be ignored during validation. + * Index is ignored if it is a system index or if cluster.ignore_dot_indexes is set to true + * then indexes which are starting with dot and are not data stream index are ignored. + * + * @param indexName The index which needs to be validated. + */ + private boolean shouldIndexBeIgnored(String indexName) { + if (this.ignoreDotIndexes) { + return validateDotIndex(indexName) && !isDataStreamIndex(indexName); + } else return systemIndices.validateSystemIndex(indexName); + } + + /** + * Returns true if the index name starts with '.' else false. + * + * @param indexName The index which needs to be validated. + */ + private boolean validateDotIndex(String indexName) { + return indexName.charAt(0) == '.'; + } + + /** + * Returns true if the index is dataStreamIndex false otherwise. + * + * @param indexName The index which needs to be validated. + */ + private boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX); + } + /** * Checks to see if an operation can be performed without taking the cluster over the cluster-wide shard limit. * Returns an error message if appropriate, or an empty {@link Optional} otherwise. diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index ac03ab49bbbbd..5348fd0a778f7 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -592,7 +592,7 @@ public void testValidateIndexName() throws Exception { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -674,7 +674,7 @@ public void testValidateDotIndex() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -1090,7 +1090,7 @@ public void testvalidateIndexSettings() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, @@ -1234,7 +1234,7 @@ public void testIndexLifecycleNameSetting() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), new IndexScopedSettings(ilnSetting, Collections.emptySet()), threadPool, @@ -1311,7 +1311,7 @@ private static Map convertMappings(ImmutableOpenMap< } private ShardLimitValidator randomShardLimitService() { - return createTestShardLimitService(randomIntBetween(10, 10000)); + return createTestShardLimitService(randomIntBetween(10, 10000), false); } private void withTemporaryClusterService(BiConsumer consumer) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 887d8469bd01c..aa1c500030bd6 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -2100,7 +2100,7 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, @@ -2163,7 +2163,7 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { indicesService, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 60c467e3864e9..f19689565dd92 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -59,6 +59,7 @@ import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -113,7 +114,7 @@ public void testUnderShardLimit() { * even though it exceeds the cluster max shard limit */ public void testSystemIndexCreationSucceeds() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -128,7 +129,7 @@ public void testSystemIndexCreationSucceeds() { * fails when it exceeds the cluster max shard limit */ public void testNonSystemIndexCreationFails() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -151,6 +152,92 @@ public void testNonSystemIndexCreationFails() { ); } + /** + * This test validates that index starting with dot creation Succeeds + * when the setting cluster.ignore_dot_indexes is set to true. + */ + public void testDotIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".test-index", settings, state); + } + + /** + * This test validates that index starting with dot creation fails + * when the setting cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation fails + * when the cluster.ignore_dot_indexes is set to true, and we reach the max shard per node limit. + */ + public void testDataStreamIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".ds-test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation succeeds + * when the cluster.ignore_dot_indexes is set to true, and we don't reach the max shard per node limit. + */ + public void testDataStreamIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(4, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".ds-test-index", settings, state); + } + /** * This test validates that non-system index opening * fails when it exceeds the cluster max shard limit @@ -174,7 +261,7 @@ public void testNonSystemIndexOpeningFails() { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); int maxShards = counts.getShardsPerNode() * nodesInCluster; - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); ValidationException exception = expectThrows( ValidationException.class, () -> shardLimitValidator.validateShardLimit(state, indices) @@ -214,10 +301,124 @@ public void testSystemIndexOpeningSucceeds() { .toArray(new Index[2]); // Shard limit validation succeeds without any issues as system index is being opened - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + shardLimitValidator.validateShardLimit(state, indices); + } + + /** + * This test validates that index having '.' in the first character + * opening of such indexes succeeds even when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes setting is set to true. + */ + public void testDotIndexOpeningSucceeds() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + // Shard limit validation succeeds without any issues + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); shardLimitValidator.validateShardLimit(state, indices); } + /** + * This test validates that index having '.' in the first character + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that index starting with '.ds-' + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to true. + */ + public void testDataStreamIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".ds-test-index", // Adding closed data stream index to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int shardsInIndex, int replicas) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); for (int i = 0; i < nodesInCluster; i++) { @@ -292,12 +493,16 @@ public static ClusterState createClusterForShardLimitTest( * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode) { + public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, boolean ignoreDotIndexes) { // Use a mocked clusterService - for unit tests we won't be updating the setting anyway. ClusterService clusterService = mock(ClusterService.class); - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(limitOnlySettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); @@ -309,11 +514,19 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo * Creates a {@link ShardLimitValidator} for testing with the given setting and a given cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @param clusterService the cluster service to use * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, ClusterService clusterService) { - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + public static ShardLimitValidator createTestShardLimitService( + int maxShardsPerNode, + boolean ignoreDotIndexes, + ClusterService clusterService + ) { + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); } From 3af46aeceed6b710301bac3318b2f4554d6f11e4 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 19 Oct 2022 19:30:04 -0400 Subject: [PATCH 074/122] Addressing 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ (#4827) * Addressing 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../opensearch/client/RestClientBuilder.java | 14 +++++++++++++- .../documentation/RestClientDocumentation.java | 18 ++++++++++++++++++ .../index/reindex/ReindexSslConfig.java | 11 +++++++++++ .../test/rest/OpenSearchRestTestCase.java | 15 ++++++++++++++- 5 files changed, 57 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aae68b7f715d7..9f7e9b02eb764 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -149,6 +149,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 679a7ccb17d49..a01cf2f403099 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,8 +32,10 @@ package org.opensearch.client; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.util.Timeout; import org.apache.hc.client5.http.async.HttpAsyncClient; import org.apache.hc.client5.http.auth.CredentialsProvider; @@ -48,6 +50,7 @@ import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; import java.security.AccessController; import java.security.NoSuchAlgorithmException; @@ -311,7 +314,16 @@ private CloseableHttpAsyncClient createHttpClient() { } try { - final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(SSLContext.getDefault()).build(); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(SSLContext.getDefault()) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index f4c1c98dd4ce9..b2807d35d230e 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -40,6 +40,7 @@ import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpEntity; @@ -51,6 +52,7 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.ssl.SSLContextBuilder; import org.apache.hc.core5.ssl.SSLContexts; import org.apache.hc.core5.util.Timeout; @@ -67,6 +69,8 @@ import org.opensearch.client.RestClientBuilder.HttpClientConfigCallback; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; @@ -429,6 +433,13 @@ public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() .setSslContext(sslContext) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() @@ -463,6 +474,13 @@ public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() .setSslContext(sslContext) + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index f8e9018bce6df..0e0e387b78e38 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -35,7 +35,9 @@ import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -50,6 +52,8 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; @@ -178,6 +182,13 @@ TlsStrategy getStrategy() { .setHostnameVerifier(hostnameVerifier) .setCiphers(cipherSuites) .setTlsVersions(protocols) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 348ea0a924b70..0b384f07cc7ea 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -37,12 +37,14 @@ import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpStatus; import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.hc.core5.http.message.BasicHeader; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.ssl.SSLContexts; import org.apache.hc.core5.util.Timeout; import org.apache.lucene.util.SetOnce; @@ -85,6 +87,8 @@ import org.junit.Before; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -851,7 +855,16 @@ protected static void configureClient(RestClientBuilder builder, Settings settin } final SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); builder.setHttpClientConfigCallback(httpClientBuilder -> { - final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(sslcontext).build(); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslcontext) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setTlsStrategy(tlsStrategy) From 334473872ae33de53958c35c50849ed2d2c1bee8 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Thu, 20 Oct 2022 19:07:14 +0530 Subject: [PATCH 075/122] Fail weight update when decommission ongoing and fail decommission when attribute not weighed away (#4839) * Add checks for decommission before setting weights Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../AwarenessAttributeDecommissionIT.java | 69 +++++++++++++++ .../decommission/DecommissionService.java | 28 ++++++ .../routing/WeightedRoutingService.java | 15 ++++ .../DecommissionServiceTests.java | 64 ++++++++++++++ .../routing/WeightedRoutingServiceTests.java | 86 +++++++++++++++++++ 6 files changed, 263 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f7e9b02eb764..1e5d7a3b57862 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Refactored BalancedAllocator.Balancer to LocalShardsBalancer ([#4761](https://github.com/opensearch-project/OpenSearch/pull/4761)) +- Fail weight update when decommission ongoing and fail decommission when attribute not weighed away ([#4839](https://github.com/opensearch-project/OpenSearch/pull/4839)) ### Deprecated ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index a2270d63ba6fa..2dc964e3e8845 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -20,11 +20,15 @@ import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.DecommissioningFailedException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; @@ -37,6 +41,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import static org.opensearch.test.NodeRoles.onlyRole; @@ -102,6 +107,17 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx ensureStableCluster(6); + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + logger.info("--> starting decommissioning nodes in zone {}", 'c'); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); @@ -162,4 +178,57 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx // as by then all nodes should have joined the cluster ensureStableCluster(6, TimeValue.timeValueMinutes(2)); } + + public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + // Start 3 cluster manager eligible nodes + internalCluster().startClusterManagerOnlyNodes(3, Settings.builder().put(commonSettings).build()); + // start 3 data nodes + internalCluster().startDataOnlyNodes(3, Settings.builder().put(commonSettings).build()); + ensureStableCluster(6); + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(6)) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue( + ex.getMessage() + .contains("no weights are set to the attribute. Please set appropriate weights before triggering decommission action") + ); + }); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 1.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue(ex.getMessage().contains("weight for decommissioned attribute is expected to be [0.0] but found [1.0]")); + }); + } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index b2c8bfbc0cdc8..e6639ae058066 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -20,7 +20,9 @@ import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -129,6 +131,8 @@ public ClusterState execute(ClusterState currentState) { DecommissionAttributeMetadata decommissionAttributeMetadata = currentState.metadata().decommissionAttributeMetadata(); // check that request is eligible to proceed ensureEligibleRequest(decommissionAttributeMetadata, decommissionAttribute); + // ensure attribute is weighed away + ensureToBeDecommissionedAttributeWeighedAway(currentState, decommissionAttribute); decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute); logger.info("registering decommission metadata [{}] to execute action", decommissionAttributeMetadata.toString()); return ClusterState.builder(currentState) @@ -413,6 +417,30 @@ private static void validateAwarenessAttribute( } } + private static void ensureToBeDecommissionedAttributeWeighedAway(ClusterState state, DecommissionAttribute decommissionAttribute) { + WeightedRoutingMetadata weightedRoutingMetadata = state.metadata().weightedRoutingMetadata(); + if (weightedRoutingMetadata == null) { + throw new DecommissioningFailedException( + decommissionAttribute, + "no weights are set to the attribute. Please set appropriate weights before triggering decommission action" + ); + } + WeightedRouting weightedRouting = weightedRoutingMetadata.getWeightedRouting(); + if (weightedRouting.attributeName().equals(decommissionAttribute.attributeName()) == false) { + throw new DecommissioningFailedException( + decommissionAttribute, + "no weights are specified to attribute [" + decommissionAttribute.attributeName() + "]" + ); + } + Double attributeValueWeight = weightedRouting.weights().get(decommissionAttribute.attributeValue()); + if (attributeValueWeight == null || attributeValueWeight.equals(0.0) == false) { + throw new DecommissioningFailedException( + decommissionAttribute, + "weight for decommissioned attribute is expected to be [0.0] but found [" + attributeValueWeight + "]" + ); + } + } + private static void ensureEligibleRequest( DecommissionAttributeMetadata decommissionAttributeMetadata, DecommissionAttribute requestedDecommissionAttribute diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 54f2c81aea384..6acb4a1e832cb 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -19,6 +19,8 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; @@ -68,6 +70,8 @@ public void registerWeightedRoutingMetadata( clusterService.submitStateUpdateTask("update_weighted_routing", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { + // verify currently no decommission action is ongoing + ensureNoOngoingDecommissionAction(currentState); Metadata metadata = currentState.metadata(); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); WeightedRoutingMetadata weightedRoutingMetadata = metadata.custom(WeightedRoutingMetadata.TYPE); @@ -154,4 +158,15 @@ public void verifyAwarenessAttribute(String attributeName) { throw validationException; } } + + public void ensureNoOngoingDecommissionAction(ClusterState state) { + DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null && decommissionAttributeMetadata.status().equals(DecommissionStatus.FAILED) == false) { + throw new IllegalStateException( + "a decommission action is ongoing with status [" + + decommissionAttributeMetadata.status().status() + + "], cannot update weight during this state" + ); + } + } } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 7dee51b7713f9..c4cf6c7cc6641 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -22,14 +22,17 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -169,6 +172,56 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testDecommissionNotStartedWithoutWeighingAwayAttribute_1() throws InterruptedException { + Map weights = Map.of("zone_1", 1.0, "zone_2", 1.0, "zone_3", 0.0); + setWeightedRoutingWeights(weights); + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone_1"); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DecommissionResponse decommissionResponse) { + fail("on response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof DecommissioningFailedException); + assertThat( + e.getMessage(), + Matchers.containsString("weight for decommissioned attribute is expected to be [0.0] but found [1.0]") + ); + countDownLatch.countDown(); + } + }; + decommissionService.startDecommissionAction(decommissionAttribute, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + public void testDecommissionNotStartedWithoutWeighingAwayAttribute_2() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone_1"); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DecommissionResponse decommissionResponse) { + fail("on response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof DecommissioningFailedException); + assertThat( + e.getMessage(), + Matchers.containsString( + "no weights are set to the attribute. Please set appropriate weights before triggering decommission action" + ) + ); + countDownLatch.countDown(); + } + }; + decommissionService.startDecommissionAction(decommissionAttribute, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + @SuppressWarnings("unchecked") public void testDecommissioningFailedWhenAnotherAttributeDecommissioningSuccessful() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); @@ -286,6 +339,17 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + private void setWeightedRoutingWeights(Map weights) { + ClusterState clusterState = clusterService.state(); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); + clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); + ClusterState.Builder builder = ClusterState.builder(clusterState); + ClusterServiceUtils.setState(clusterService, builder); + } + private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index fc5d46ef84c79..91b8703cacf5c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.routing; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; import org.opensearch.Version; @@ -21,6 +22,9 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -43,6 +47,11 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; public class WeightedRoutingServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; @@ -173,6 +182,15 @@ private ClusterState setWeightedRoutingWeights(ClusterState clusterState, Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); ClusterState state = clusterService.state(); @@ -276,4 +294,72 @@ public void testVerifyAwarenessAttribute_ValidAttributeName() { fail("verify awareness attribute should not fail"); } } + + public void testAddWeightedRoutingFailsWhenDecommissionOngoing() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + DecommissionStatus status = randomFrom(DecommissionStatus.INIT, DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + state = setDecommissionAttribute(state, status); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + WeightedRouting updatedWeightedRouting = new WeightedRouting("zone", weights); + request.setWeightedRouting(updatedWeightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final AtomicReference exceptionReference = new AtomicReference<>(); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionReference.set(e); + countDownLatch.countDown(); + } + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(IllegalStateException.class)); + MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("a decommission action is ongoing with status")); + } + + public void testAddWeightedRoutingPassesWhenDecommissionFailed() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + DecommissionStatus status = DecommissionStatus.FAILED; + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + state = setDecommissionAttribute(state, status); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + WeightedRouting updatedWeightedRouting = new WeightedRouting("zone", weights); + request.setWeightedRouting(updatedWeightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final AtomicReference exceptionReference = new AtomicReference<>(); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + assertTrue(clusterStateUpdateResponse.isAcknowledged()); + assertEquals(updatedWeightedRouting, clusterService.state().metadata().weightedRoutingMetadata().getWeightedRouting()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } } From 515f84bf92ad06f2178d4d5269c38a78a6bf215b Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 20 Oct 2022 11:50:33 -0500 Subject: [PATCH 076/122] [Remove] Deprecated serialization logic from pipeline aggs (#4847) Removes the deprecated readTo/writeFrom serialization logic from piepline aggs that is no longer used as of Legacy version 7.8. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/plugins/SearchPlugin.java | 74 +----------- .../org/opensearch/search/SearchModule.java | 36 ------ .../search/aggregations/AggregationPhase.java | 3 +- .../aggregations/InternalAggregation.java | 40 ------- .../aggregations/InternalAggregations.java | 107 +----------------- .../pipeline/AvgBucketPipelineAggregator.java | 14 --- .../BucketMetricsPipelineAggregator.java | 21 ---- .../BucketScriptPipelineAggregator.java | 28 ----- .../BucketSelectorPipelineAggregator.java | 26 ----- .../BucketSortPipelineAggregator.java | 27 ----- .../CumulativeSumPipelineAggregator.java | 21 ---- .../DerivativePipelineAggregator.java | 25 ---- ...ExtendedStatsBucketPipelineAggregator.java | 21 ---- .../pipeline/MaxBucketPipelineAggregator.java | 14 --- .../pipeline/MinBucketPipelineAggregator.java | 14 --- .../pipeline/MovAvgPipelineAggregator.java | 31 ----- .../pipeline/MovFnPipelineAggregator.java | 28 ----- .../PercentilesBucketPipelineAggregator.java | 23 ---- .../pipeline/PipelineAggregator.java | 55 +-------- .../SerialDiffPipelineAggregator.java | 25 ---- .../pipeline/SiblingPipelineAggregator.java | 9 -- .../StatsBucketPipelineAggregator.java | 11 -- .../pipeline/SumBucketPipelineAggregator.java | 14 --- .../search/query/QuerySearchResult.java | 40 ++----- .../opensearch/search/SearchModuleTests.java | 24 +--- .../InternalAggregationsTests.java | 27 +---- .../terms/SignificanceHeuristicTests.java | 5 - .../test/InternalAggregationTestCase.java | 58 ---------- 29 files changed, 22 insertions(+), 800 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e5d7a3b57862..c7b736205b87b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) - Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index af7d4fc2e9fe5..a61a73255aa61 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -621,52 +621,6 @@ class PipelineAggregationSpec extends SearchExtensionSpec< PipelineAggregationBuilder, ContextParser> { private final Map> resultReaders = new TreeMap<>(); - /** - * Read the aggregator from a stream. - * @deprecated Pipelines implemented after 7.8.0 do not need to be sent across the wire - */ - @Deprecated - private final Writeable.Reader aggregatorReader; - - /** - * Specification of a {@link PipelineAggregator}. - * - * @param name holds the names by which this aggregation might be parsed. The {@link ParseField#getPreferredName()} is special as it - * is the name by under which the readers are registered. So it is the name that the {@link PipelineAggregationBuilder} and - * {@link PipelineAggregator} should return from {@link NamedWriteable#getWriteableName()}. It is an error if - * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. - * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a - * {@link StreamInput} - * @param parser reads the aggregation builder from XContent - */ - public PipelineAggregationSpec( - ParseField name, - Writeable.Reader builderReader, - ContextParser parser - ) { - super(name, builderReader, parser); - this.aggregatorReader = null; - } - - /** - * Specification of a {@link PipelineAggregator}. - * - * @param name name by which this aggregation might be parsed or deserialized. Make sure it is the name that the - * {@link PipelineAggregationBuilder} and {@link PipelineAggregator} should return from - * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including - * names from other plugins. - * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a - * {@link StreamInput} - * @param parser reads the aggregation builder from XContent - */ - public PipelineAggregationSpec( - String name, - Writeable.Reader builderReader, - ContextParser parser - ) { - super(name, builderReader, parser); - this.aggregatorReader = null; - } /** * Specification of a {@link PipelineAggregator}. @@ -677,20 +631,14 @@ public PipelineAggregationSpec( * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent - * @deprecated Use {@link PipelineAggregationSpec#PipelineAggregationSpec(ParseField, Writeable.Reader, ContextParser)} for - * pipelines implemented after 7.8.0 */ - @Deprecated public PipelineAggregationSpec( ParseField name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, ContextParser parser ) { super(name, builderReader, parser); - this.aggregatorReader = aggregatorReader; } /** @@ -702,20 +650,15 @@ public PipelineAggregationSpec( * names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent - * @deprecated Use {@link PipelineAggregationSpec#PipelineAggregationSpec(String, Writeable.Reader, ContextParser)} for pipelines - * implemented after 7.8.0 + */ - @Deprecated public PipelineAggregationSpec( String name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, ContextParser parser ) { super(name, builderReader, parser); - this.aggregatorReader = aggregatorReader; } /** @@ -727,7 +670,6 @@ public PipelineAggregationSpec( * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent * @deprecated prefer the ctor that takes a {@link ContextParser} */ @@ -735,11 +677,9 @@ public PipelineAggregationSpec( public PipelineAggregationSpec( ParseField name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, PipelineAggregator.Parser parser ) { super(name, builderReader, (p, n) -> parser.parse(n, p)); - this.aggregatorReader = aggregatorReader; } /** @@ -751,18 +691,15 @@ public PipelineAggregationSpec( * names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @deprecated prefer the ctor that takes a {@link ContextParser} */ @Deprecated public PipelineAggregationSpec( String name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, PipelineAggregator.Parser parser ) { super(name, builderReader, (p, n) -> parser.parse(n, p)); - this.aggregatorReader = aggregatorReader; } /** @@ -781,15 +718,6 @@ public PipelineAggregationSpec addResultReader(String writeableName, Writeable.R return this; } - /** - * Read the aggregator from a stream. - * @deprecated Pipelines implemented after 7.8.0 do not need to be sent across the wire - */ - @Deprecated - public Writeable.Reader getAggregatorReader() { - return aggregatorReader; - } - /** * Get the readers that must be registered for this aggregation's results. */ diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0149f9a025bcd..4bf7dca9ef444 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -211,21 +211,14 @@ import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketScriptPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketSelectorPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketSelectorPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketSortPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketSortPipelineAggregator; import org.opensearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.CumulativeSumPipelineAggregator; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregator; import org.opensearch.search.aggregations.pipeline.EwmaModel; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketParser; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.HoltLinearModel; import org.opensearch.search.aggregations.pipeline.HoltWintersModel; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -236,24 +229,15 @@ import org.opensearch.search.aggregations.pipeline.InternalStatsBucket; import org.opensearch.search.aggregations.pipeline.LinearModel; import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MinBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MovAvgModel; import org.opensearch.search.aggregations.pipeline.MovAvgPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MovAvgPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MovFnPipelineAggregator; import org.opensearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregator; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.SerialDiffPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SerialDiffPipelineAggregator; import org.opensearch.search.aggregations.pipeline.SimpleModel; import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregator; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.FetchSubPhase; @@ -710,7 +694,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( DerivativePipelineAggregationBuilder.NAME, DerivativePipelineAggregationBuilder::new, - DerivativePipelineAggregator::new, DerivativePipelineAggregationBuilder::parse ).addResultReader(InternalDerivative::new) ); @@ -718,7 +701,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MaxBucketPipelineAggregationBuilder.NAME, MaxBucketPipelineAggregationBuilder::new, - MaxBucketPipelineAggregator::new, MaxBucketPipelineAggregationBuilder.PARSER ) // This bucket is used by many pipeline aggreations. @@ -728,7 +710,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MinBucketPipelineAggregationBuilder.NAME, MinBucketPipelineAggregationBuilder::new, - MinBucketPipelineAggregator::new, MinBucketPipelineAggregationBuilder.PARSER ) /* Uses InternalBucketMetricValue */ @@ -737,7 +718,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( AvgBucketPipelineAggregationBuilder.NAME, AvgBucketPipelineAggregationBuilder::new, - AvgBucketPipelineAggregator::new, AvgBucketPipelineAggregationBuilder.PARSER ) // This bucket is used by many pipeline aggreations. @@ -747,7 +727,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( SumBucketPipelineAggregationBuilder.NAME, SumBucketPipelineAggregationBuilder::new, - SumBucketPipelineAggregator::new, SumBucketPipelineAggregationBuilder.PARSER ) /* Uses InternalSimpleValue */ @@ -756,7 +735,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( StatsBucketPipelineAggregationBuilder.NAME, StatsBucketPipelineAggregationBuilder::new, - StatsBucketPipelineAggregator::new, StatsBucketPipelineAggregationBuilder.PARSER ).addResultReader(InternalStatsBucket::new) ); @@ -764,7 +742,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( ExtendedStatsBucketPipelineAggregationBuilder.NAME, ExtendedStatsBucketPipelineAggregationBuilder::new, - ExtendedStatsBucketPipelineAggregator::new, new ExtendedStatsBucketParser() ).addResultReader(InternalExtendedStatsBucket::new) ); @@ -772,7 +749,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( PercentilesBucketPipelineAggregationBuilder.NAME, PercentilesBucketPipelineAggregationBuilder::new, - PercentilesBucketPipelineAggregator::new, PercentilesBucketPipelineAggregationBuilder.PARSER ).addResultReader(InternalPercentilesBucket::new) ); @@ -780,7 +756,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MovAvgPipelineAggregationBuilder.NAME, MovAvgPipelineAggregationBuilder::new, - MovAvgPipelineAggregator::new, (XContentParser parser, String name) -> MovAvgPipelineAggregationBuilder.parse( movingAverageModelParserRegistry, name, @@ -792,7 +767,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( CumulativeSumPipelineAggregationBuilder.NAME, CumulativeSumPipelineAggregationBuilder::new, - CumulativeSumPipelineAggregator::new, CumulativeSumPipelineAggregationBuilder.PARSER ) ); @@ -800,7 +774,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketScriptPipelineAggregationBuilder.NAME, BucketScriptPipelineAggregationBuilder::new, - BucketScriptPipelineAggregator::new, BucketScriptPipelineAggregationBuilder.PARSER ) ); @@ -808,7 +781,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketSelectorPipelineAggregationBuilder.NAME, BucketSelectorPipelineAggregationBuilder::new, - BucketSelectorPipelineAggregator::new, BucketSelectorPipelineAggregationBuilder::parse ) ); @@ -816,7 +788,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketSortPipelineAggregationBuilder.NAME, BucketSortPipelineAggregationBuilder::new, - BucketSortPipelineAggregator::new, BucketSortPipelineAggregationBuilder::parse ) ); @@ -824,7 +795,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( SerialDiffPipelineAggregationBuilder.NAME, SerialDiffPipelineAggregationBuilder::new, - SerialDiffPipelineAggregator::new, SerialDiffPipelineAggregationBuilder::parse ) ); @@ -832,7 +802,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MovFnPipelineAggregationBuilder.NAME, MovFnPipelineAggregationBuilder::new, - MovFnPipelineAggregator::new, MovFnPipelineAggregationBuilder.PARSER ) ); @@ -847,11 +816,6 @@ private void registerPipelineAggregation(PipelineAggregationSpec spec) { namedWriteables.add( new NamedWriteableRegistry.Entry(PipelineAggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader()) ); - if (spec.getAggregatorReader() != null) { - namedWriteables.add( - new NamedWriteableRegistry.Entry(PipelineAggregator.class, spec.getName().getPreferredName(), spec.getAggregatorReader()) - ); - } for (Map.Entry> resultReader : spec.getResultReaders().entrySet()) { namedWriteables.add( new NamedWriteableRegistry.Entry(InternalAggregation.class, resultReader.getKey(), resultReader.getValue()) diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java index c091a1bc47c05..946da6e9c3369 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java @@ -148,8 +148,7 @@ public void execute(SearchContext context) { throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); } } - context.queryResult() - .aggregations(new InternalAggregations(aggregations, context.request().source().aggregations()::buildPipelineTree)); + context.queryResult().aggregations(new InternalAggregations(aggregations)); // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java index 6ccd429388873..714de06fef1e0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; @@ -187,8 +186,6 @@ public void consumeBucketsAndMaybeBreak(int size) { protected final Map metadata; - private List pipelineAggregatorsForBwcSerialization; - /** * Constructs an aggregation result with a given name. * @@ -199,46 +196,18 @@ protected InternalAggregation(String name, Map metadata) { this.metadata = metadata; } - /** - * Merge a {@linkplain PipelineAggregator.PipelineTree} into this - * aggregation result tree before serializing to a node older than - * 7.8.0. - */ - public final void mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree pipelineTree) { - if (pipelineAggregatorsForBwcSerialization != null) { - /* - * This method is called once per level on the results but only - * has useful pipeline aggregations on the top level. Every level - * below the top will always be empty. So if we've already been - * called we should bail. This is pretty messy but it is the kind - * of weird thing we have to do to deal with bwc serialization.... - */ - return; - } - pipelineAggregatorsForBwcSerialization = pipelineTree.aggregators(); - forEachBucket(bucketAggs -> bucketAggs.mergePipelineTreeForBWCSerialization(pipelineTree)); - } - /** * Read from a stream. */ protected InternalAggregation(StreamInput in) throws IOException { name = in.readString(); metadata = in.readMap(); - if (in.getVersion().before(LegacyESVersion.V_7_8_0)) { - in.readNamedWriteableList(PipelineAggregator.class); - } } @Override public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeGenericValue(metadata); - if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - assert pipelineAggregatorsForBwcSerialization != null - : "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization"; - out.writeNamedWriteableList(pipelineAggregatorsForBwcSerialization); - } doWriteTo(out); } @@ -355,15 +324,6 @@ public Map getMetadata() { return metadata; } - /** - * The {@linkplain PipelineAggregator}s sent to older versions of OpenSearch. - * @deprecated only use these for serializing to older OpenSearch versions - */ - @Deprecated - public List pipelineAggregatorsForBwcSerialization() { - return pipelineAggregatorsForBwcSerialization; - } - @Override public String getType() { return getWriteableName(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index 79dd8d756dede..16d7898118fc3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -31,14 +31,12 @@ package org.opensearch.search.aggregations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.opensearch.search.aggregations.support.AggregationPath; @@ -50,13 +48,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.function.Function; -import java.util.function.Supplier; import java.util.stream.Collectors; -import static java.util.Collections.emptyList; -import static java.util.stream.Collectors.toList; - /** * An internal implementation of {@link Aggregations}. * @@ -76,29 +69,11 @@ public final class InternalAggregations extends Aggregations implements Writeabl } }; - /** - * The way to build a tree of pipeline aggregators. Used only for - * serialization backwards compatibility. - */ - private final Supplier pipelineTreeForBwcSerialization; - /** * Constructs a new aggregation. */ - private InternalAggregations(List aggregations) { - super(aggregations); - this.pipelineTreeForBwcSerialization = null; - } - - /** - * Constructs a node in the aggregation tree. - * @param pipelineTreeSource must be null inside the tree or after final reduction. Should reference the - * search request otherwise so we can properly serialize the response to - * versions of OpenSearch that require the pipelines to be serialized. - */ - public InternalAggregations(List aggregations, Supplier pipelineTreeSource) { + public InternalAggregations(List aggregations) { super(aggregations); - this.pipelineTreeForBwcSerialization = pipelineTreeSource; } public static InternalAggregations from(List aggregations) { @@ -110,45 +85,12 @@ public static InternalAggregations from(List aggregations) public static InternalAggregations readFrom(StreamInput in) throws IOException { final InternalAggregations res = from(in.readList(stream -> in.readNamedWriteable(InternalAggregation.class))); - if (in.getVersion().before(LegacyESVersion.V_7_8_0)) { - /* - * Setting the pipeline tree source to null is here is correct but - * only because we don't immediately pass the InternalAggregations - * off to another node. Instead, we always reduce together with - * many aggregations and that always adds the tree read from the - * current request. - */ - in.readNamedWriteableList(PipelineAggregator.class); - } return res; } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - if (pipelineTreeForBwcSerialization == null) { - mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); - out.writeNamedWriteableList(getInternalAggregations()); - out.writeNamedWriteableList(emptyList()); - } else { - PipelineAggregator.PipelineTree pipelineTree = pipelineTreeForBwcSerialization.get(); - mergePipelineTreeForBWCSerialization(pipelineTree); - out.writeNamedWriteableList(getInternalAggregations()); - out.writeNamedWriteableList(pipelineTree.aggregators()); - } - } else { - out.writeNamedWriteableList(getInternalAggregations()); - } - } - - /** - * Merge a {@linkplain PipelineAggregator.PipelineTree} into this - * aggregation result tree before serializing to a node older than - * 7.8.0. - */ - public void mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree pipelineTree) { - getInternalAggregations().stream() - .forEach(agg -> { agg.mergePipelineTreeForBWCSerialization(pipelineTree.subTree(agg.getName())); }); + out.writeNamedWriteableList(getInternalAggregations()); } /** @@ -160,26 +102,6 @@ public List copyResults() { return new ArrayList<>(getInternalAggregations()); } - /** - * Get the top level pipeline aggregators. - * @deprecated these only exist for BWC serialization - */ - @Deprecated - public List getTopLevelPipelineAggregators() { - if (pipelineTreeForBwcSerialization == null) { - return emptyList(); - } - return pipelineTreeForBwcSerialization.get().aggregators().stream().map(p -> (SiblingPipelineAggregator) p).collect(toList()); - } - - /** - * Get the transient pipeline tree used to serialize pipeline aggregators to old nodes. - */ - @Deprecated - Supplier getPipelineTreeForBwcSerialization() { - return pipelineTreeForBwcSerialization; - } - @SuppressWarnings("unchecked") private List getInternalAggregations() { return (List) aggregations; @@ -208,11 +130,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator aggregationsList, ReduceContext context) { - InternalAggregations reduced = reduce( - aggregationsList, - context, - reducedAggregations -> new InternalAggregations(reducedAggregations, context.pipelineTreeForBwcSerialization()) - ); + InternalAggregations reduced = reduce(aggregationsList, context); if (reduced == null) { return null; } @@ -238,16 +156,8 @@ public static InternalAggregations topLevelReduce(List agg * {@link InternalAggregations} object found in the list. * Note that pipeline aggregations _are not_ reduced by this method. Pipelines are handled * separately by {@link InternalAggregations#topLevelReduce(List, ReduceContext)} - * @param ctor used to build the {@link InternalAggregations}. The top level reduce specifies a constructor - * that adds pipeline aggregation information that is used to send pipeline aggregations to - * older versions of Elasticsearch that require the pipeline aggregations to be returned - * as part of the aggregation tree */ - public static InternalAggregations reduce( - List aggregationsList, - ReduceContext context, - Function, InternalAggregations> ctor - ) { + public static InternalAggregations reduce(List aggregationsList, ReduceContext context) { if (aggregationsList.isEmpty()) { return null; } @@ -280,14 +190,7 @@ public static InternalAggregations reduce( } } - return ctor.apply(reducedAggregations); - } - - /** - * Version of {@link #reduce(List, ReduceContext, Function)} for nodes inside the aggregation tree. - */ - public static InternalAggregations reduce(List aggregationsList, ReduceContext context) { - return reduce(aggregationsList, context, InternalAggregations::from); + return new InternalAggregations(reducedAggregations); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java index e9e1f13a80766..e4f0294ae0723 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -59,18 +57,6 @@ public class AvgBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, format, metadata); } - /** - * Read from a stream. - */ - public AvgBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return AvgBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { count = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java index 4836428027f08..5a2e1238e19c7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregations; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; -import java.io.IOException; import java.util.List; import java.util.Map; @@ -70,24 +67,6 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg this.format = format; } - /** - * Read from a stream. - */ - BucketMetricsPipelineAggregator(StreamInput in) throws IOException { - super(in); - format = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - public final void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(format); - gapPolicy.writeTo(out); - innerWriteTo(out); - } - - protected void innerWriteTo(StreamOutput out) throws IOException {} - @Override public final InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { preCollection(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java index abf98b6ea4a6f..41e11e2537845 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.BucketAggregationScript; import org.opensearch.script.Script; import org.opensearch.search.DocValueFormat; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -79,31 +76,6 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - @SuppressWarnings("unchecked") - public BucketScriptPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - bucketsPathsMap = (Map) in.readGenericValue(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeGenericValue(bucketsPathsMap); - } - - @Override - public String getWriteableName() { - return BucketScriptPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java index 8f5426eae83a4..24f64bc78815d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.BucketAggregationSelectorScript; import org.opensearch.script.Script; import org.opensearch.search.aggregations.InternalAggregation; @@ -41,7 +39,6 @@ import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -72,29 +69,6 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - @SuppressWarnings("unchecked") - public BucketSelectorPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - gapPolicy = GapPolicy.readFrom(in); - bucketsPathsMap = (Map) in.readGenericValue(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - gapPolicy.writeTo(out); - out.writeGenericValue(bucketsPathsMap); - } - - @Override - public String getWriteableName() { - return BucketSelectorPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java index 809514614b1f2..571fbf5ad42a6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java @@ -31,8 +31,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; @@ -41,7 +39,6 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -75,30 +72,6 @@ public class BucketSortPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - public BucketSortPipelineAggregator(StreamInput in) throws IOException { - super(in); - sorts = in.readList(FieldSortBuilder::new); - from = in.readVInt(); - size = in.readOptionalVInt(); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeList(sorts); - out.writeVInt(from); - out.writeOptionalVInt(size); - gapPolicy.writeTo(out); - } - - @Override - public String getWriteableName() { - return BucketSortPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java index cba35c89cc639..c11ed9d7ded7d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -65,24 +62,6 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator { this.formatter = formatter; } - /** - * Read from a stream. - */ - public CumulativeSumPipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - } - - @Override - public String getWriteableName() { - return CumulativeSumPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java index 3603d1c5a0c58..4bdd2c3168fae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,28 +73,6 @@ public class DerivativePipelineAggregator extends PipelineAggregator { this.xAxisUnits = xAxisUnits == null ? null : (double) xAxisUnits; } - /** - * Read from a stream. - */ - public DerivativePipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - xAxisUnits = in.readOptionalDouble(); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeOptionalDouble(xAxisUnits); - } - - @Override - public String getWriteableName() { - return DerivativePipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java index a63a1da332d08..81f4a441e3772 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java @@ -32,13 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -66,24 +63,6 @@ public class ExtendedStatsBucketPipelineAggregator extends BucketMetricsPipeline this.sigma = sigma; } - /** - * Read from a stream. - */ - public ExtendedStatsBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - sigma = in.readDouble(); - } - - @Override - protected void innerWriteTo(StreamOutput out) throws IOException { - out.writeDouble(sigma); - } - - @Override - public String getWriteableName() { - return ExtendedStatsBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java index 1e327919755c7..7e63af31a9c86 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -61,18 +59,6 @@ public class MaxBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public MaxBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return MaxBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { maxBucketKeys = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java index 2b57aac14ea1e..773fc7441d96e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -61,18 +59,6 @@ public class MinBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public MinBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return MinBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { minBucketKeys = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java index 2e3c44ac3902d..aa78439784992 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java @@ -33,8 +33,6 @@ package org.opensearch.search.aggregations.pipeline; import org.opensearch.common.collect.EvictingQueue; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -45,7 +43,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; @@ -88,34 +85,6 @@ public class MovAvgPipelineAggregator extends PipelineAggregator { this.minimize = minimize; } - /** - * Read from a stream. - */ - public MovAvgPipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - window = in.readVInt(); - predict = in.readVInt(); - model = in.readNamedWriteable(MovAvgModel.class); - minimize = in.readBoolean(); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeVInt(window); - out.writeVInt(predict); - out.writeNamedWriteable(model); - out.writeBoolean(minimize); - } - - @Override - public String getWriteableName() { - return MovAvgPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 7b20a796b8134..a4c3c14f3365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; @@ -42,7 +40,6 @@ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -98,31 +95,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { this.shift = shift; } - public MovFnPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = BucketHelpers.GapPolicy.readFrom(in); - bucketsPath = in.readString(); - window = in.readInt(); - shift = in.readInt(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeString(bucketsPath); - out.writeInt(window); - out.writeInt(shift); - } - - @Override - public String getWriteableName() { - return MovFnPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, InternalAggregation.ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index 7fad7e233c424..41699ffb8b6b8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,13 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -69,26 +66,6 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg this.keyed = keyed; } - /** - * Read from a stream. - */ - public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - percents = in.readDoubleArray(); - keyed = in.readBoolean(); - } - - @Override - public void innerWriteTo(StreamOutput out) throws IOException { - out.writeDoubleArray(percents); - out.writeBoolean(keyed); - } - - @Override - public String getWriteableName() { - return PercentilesBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { data = new ArrayList<>(1024); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java index 4b1134d9e1a60..204dcdb7a400f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java @@ -32,11 +32,7 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; -import org.opensearch.common.io.stream.NamedWriteable; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -54,7 +50,7 @@ * * @opensearch.internal */ -public abstract class PipelineAggregator implements NamedWriteable { +public abstract class PipelineAggregator { /** * Parse the {@link PipelineAggregationBuilder} from a {@link XContentParser}. * @@ -139,55 +135,6 @@ protected PipelineAggregator(String name, String[] bucketsPaths, Map { diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java index 577f66f94e90f..a01be279853f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -61,15 +59,6 @@ public class StatsBucketPipelineAggregator extends BucketMetricsPipelineAggregat super(name, bucketsPaths, gapPolicy, formatter, metadata); } - public StatsBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return StatsBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java index 32a64fafa7a2d..01939b1b1063c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -58,18 +56,6 @@ public class SumBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public SumBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return SumBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index 31fdc5c9d9e9d..a0c2970625472 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.DelayableWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -95,11 +94,7 @@ public QuerySearchResult() { public QuerySearchResult(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - isNull = in.readBoolean(); - } else { - isNull = false; - } + isNull = in.readBoolean(); if (isNull == false) { ShardSearchContextId id = new ShardSearchContextId(in); readFromWithId(id, in); @@ -355,14 +350,8 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc } } setTopDocs(readTopDocs(in)); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - if (hasAggs = in.readBoolean()) { - aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); - } - } else { - if (hasAggs = in.readBoolean()) { - aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); - } + if (hasAggs = in.readBoolean()) { + aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); } if (in.readBoolean()) { suggest = new Suggest(in); @@ -373,17 +362,13 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc hasProfileResults = profileShardResults != null; serviceTimeEWMA = in.readZLong(); nodeQueueSize = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); - setRescoreDocIds(new RescoreDocIds(in)); - } + setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); + setRescoreDocIds(new RescoreDocIds(in)); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeBoolean(isNull); - } + out.writeBoolean(isNull); if (isNull == false) { contextId.writeTo(out); writeToNoId(out); @@ -406,12 +391,7 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - InternalAggregations aggs = aggregations.expand(); - aggs.writeTo(out); - } else { - aggregations.writeTo(out); - } + aggregations.writeTo(out); } if (suggest == null) { out.writeBoolean(false); @@ -424,10 +404,8 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeOptionalWriteable(profileShardResults); out.writeZLong(serviceTimeEWMA); out.writeInt(nodeQueueSize); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeOptionalWriteable(getShardSearchRequest()); - getRescoreDocIds().writeTo(out); - } + out.writeOptionalWriteable(getShardSearchRequest()); + getRescoreDocIds().writeTo(out); } public TotalHits getTotalHits() { diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 05d4153949f9a..48a935ea984fa 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -56,7 +56,6 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ChiSquare; import org.opensearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregator; import org.opensearch.search.aggregations.pipeline.InternalDerivative; import org.opensearch.search.aggregations.pipeline.MovAvgModel; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; @@ -193,7 +192,6 @@ public List getPipelineAggregations() { new PipelineAggregationSpec( DerivativePipelineAggregationBuilder.NAME, DerivativePipelineAggregationBuilder::new, - DerivativePipelineAggregator::new, DerivativePipelineAggregationBuilder::parse ).addResultReader(InternalDerivative::new) ); @@ -375,12 +373,7 @@ public void testRegisterPipelineAggregation() { @Override public List getPipelineAggregations() { return singletonList( - new PipelineAggregationSpec( - "test", - TestPipelineAggregationBuilder::new, - TestPipelineAggregator::new, - TestPipelineAggregationBuilder::fromXContent - ) + new PipelineAggregationSpec("test", TestPipelineAggregationBuilder::new, TestPipelineAggregationBuilder::fromXContent) ); } })); @@ -570,21 +563,10 @@ protected void validate(ValidationContext context) {} * Dummy test {@link PipelineAggregator} used to test registering aggregation builders. */ private static class TestPipelineAggregator extends PipelineAggregator { - /** - * Read from a stream. - */ - TestPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return "test"; + TestPipelineAggregator() { + super("test", new String[] {}, null); } - @Override - protected void doWriteTo(StreamOutput out) throws IOException {} - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { return null; diff --git a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java index bb28c0657ac8e..2d022be10fe29 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java @@ -43,12 +43,9 @@ import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.StringTermsTests; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; -import org.opensearch.search.aggregations.pipeline.SiblingPipelineAggregator; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; @@ -91,7 +88,6 @@ public void testNonFinalReduceTopLevelPipelineAggs() { ); List aggs = singletonList(InternalAggregations.from(Collections.singletonList(terms))); InternalAggregations reducedAggs = InternalAggregations.topLevelReduce(aggs, maxBucketReduceContext().forPartialReduction()); - assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(1, reducedAggs.aggregations.size()); } @@ -116,7 +112,6 @@ public void testFinalReduceTopLevelPipelineAggs() { Collections.singletonList(aggs), maxBucketReduceContext().forFinalReduction() ); - assertEquals(0, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(2, reducedAggs.aggregations.size()); } @@ -130,10 +125,6 @@ private InternalAggregation.ReduceContextBuilder maxBucketReduceContext() { } public static InternalAggregations createTestInstance() throws Exception { - return createTestInstance(randomPipelineTree()); - } - - public static InternalAggregations createTestInstance(PipelineAggregator.PipelineTree pipelineTree) throws Exception { List aggsList = new ArrayList<>(); if (randomBoolean()) { StringTermsTests stringTermsTests = new StringTermsTests(); @@ -150,23 +141,7 @@ public static InternalAggregations createTestInstance(PipelineAggregator.Pipelin InternalSimpleValueTests simpleValueTests = new InternalSimpleValueTests(); aggsList.add(simpleValueTests.createTestInstance()); } - return new InternalAggregations(aggsList, () -> pipelineTree); - } - - private static PipelineAggregator.PipelineTree randomPipelineTree() { - List topLevelPipelineAggs = new ArrayList<>(); - if (randomBoolean()) { - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); - } - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); - } - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); - } - } - return new PipelineAggregator.PipelineTree(emptyMap(), topLevelPipelineAggs); + return new InternalAggregations(aggsList); } public void testSerialization() throws Exception { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java index 9c7196db68e97..c39445fef88c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.InputStreamStreamInput; @@ -57,7 +56,6 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.MutualInformation; import org.opensearch.search.aggregations.bucket.terms.heuristic.PercentageScore; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; @@ -95,9 +93,6 @@ public void testStreamResponse() throws Exception { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); out.setVersion(version); - if (version.before(LegacyESVersion.V_7_8_0)) { - sigTerms.mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree.EMPTY); - } out.writeNamedWriteable(sigTerms); // read diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index 5325c48e16913..2cba15f2e2039 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -144,12 +144,10 @@ import org.opensearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; import org.opensearch.search.aggregations.pipeline.InternalSimpleValue; -import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.ParsedBucketMetricValue; import org.opensearch.search.aggregations.pipeline.ParsedDerivative; import org.opensearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; @@ -160,7 +158,6 @@ import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -175,7 +172,6 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.opensearch.common.xcontent.XContentHelper.toXContent; import static org.opensearch.search.aggregations.InternalMultiBucketAggregation.countInnerBucket; @@ -482,60 +478,6 @@ public final void testFromXContentWithRandomFields() throws IOException { assertFromXContent(aggregation, parsedAggregation); } - public void testMergePipelineTreeForBWCSerialization() { - T agg = createTestInstance(); - PipelineAggregator.PipelineTree pipelineTree = randomPipelineTree(agg); - agg.mergePipelineTreeForBWCSerialization(pipelineTree); - assertMergedPipelineTreeForBWCSerialization(agg, pipelineTree); - } - - public void testMergePipelineTreeTwice() { - T agg = createTestInstance(); - PipelineAggregator.PipelineTree pipelineTree = randomPipelineTree(agg); - agg.mergePipelineTreeForBWCSerialization(pipelineTree); - agg.mergePipelineTreeForBWCSerialization(randomPipelineTree(agg)); // This should be ignored - assertMergedPipelineTreeForBWCSerialization(agg, pipelineTree); - } - - public static PipelineAggregator.PipelineTree randomPipelineTree(InternalAggregation aggregation) { - Map subTree = new HashMap<>(); - aggregation.forEachBucket(bucketAggs -> { - for (Aggregation subAgg : bucketAggs) { - if (subTree.containsKey(subAgg.getName())) { - continue; - } - subTree.put(subAgg.getName(), randomPipelineTree((InternalAggregation) subAgg)); - } - }); - return new PipelineAggregator.PipelineTree(emptyMap(), randomPipelineAggregators()); - } - - public static List randomPipelineAggregators() { - List pipelines = new ArrayList<>(); - if (randomBoolean()) { - if (randomBoolean()) { - pipelines.add(new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); - } - if (randomBoolean()) { - pipelines.add(new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); - } - if (randomBoolean()) { - pipelines.add(new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); - } - } - return pipelines; - } - - @SuppressWarnings("deprecation") - private void assertMergedPipelineTreeForBWCSerialization(InternalAggregation agg, PipelineAggregator.PipelineTree pipelineTree) { - assertThat(agg.pipelineAggregatorsForBwcSerialization(), equalTo(pipelineTree.aggregators())); - agg.forEachBucket(bucketAggs -> { - for (Aggregation subAgg : bucketAggs) { - assertMergedPipelineTreeForBWCSerialization((InternalAggregation) subAgg, pipelineTree.subTree(subAgg.getName())); - } - }); - } - protected abstract void assertFromXContent(T aggregation, ParsedAggregation parsedAggregation) throws IOException; @SuppressWarnings("unchecked") From b36906ffb2f81722e882752f35ee59eb8827a8c5 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Fri, 21 Oct 2022 13:32:29 -0400 Subject: [PATCH 077/122] Added a section of how to deal with flakey tests to the developer guide. (#4868) Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + DEVELOPER_GUIDE.md | 108 +++++++++++++++++++++++++++------------------ 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7b736205b87b..6bd55d3d4495a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) - Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) +- Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 8c2a6b4889122..78b926f8ba18b 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -38,21 +38,22 @@ - [Gradle Plugins](#gradle-plugins) - [Distribution Download Plugin](#distribution-download-plugin) - [Creating fat-JAR of a Module](#creating-fat-jar-of-a-module) - - [Misc](#misc) - - [git-secrets](#git-secrets) - - [Installation](#installation) - - [Configuration](#configuration) - [Components](#components) - [Build libraries & interfaces](#build-libraries--interfaces) - [Clients & Libraries](#clients--libraries) - [Plugins](#plugins-1) - - [Indexing & search](#indexing--search) + - [Indexing & Search](#indexing--search) - [Aggregations](#aggregations) - [Distributed Framework](#distributed-framework) - - [Submitting Changes](#submitting-changes) - - [Backports](#backports) - - [LineLint](#linelint) -- [Lucene Snapshots](#lucene-snapshots) + - [Misc](#misc) + - [Git Secrets](#git-secrets) + - [Installation](#installation) + - [Configuration](#configuration) + - [Submitting Changes](#submitting-changes) + - [Backports](#backports) + - [LineLint](#linelint) + - [Lucene Snapshots](#lucene-snapshots) + - [Flakey Tests](#flakey-tests) # Developer Guide @@ -414,37 +415,12 @@ Refer the installed JAR as any other maven artifact, e.g. ``` -## Misc - -### git-secrets - -Security is our top priority. Avoid checking in credentials. - -#### Installation -Install [awslabs/git-secrets](https://github.com/awslabs/git-secrets) by running the following commands. -``` -git clone https://github.com/awslabs/git-secrets.git -cd git-secrets -make install -``` - -#### Configuration -You can configure git secrets per repository, you need to change the directory to the root of the repository and run the following command. -``` -git secrets --install -✓ Installed commit-msg hook to .git/hooks/commit-msg -✓ Installed pre-commit hook to .git/hooks/pre-commit -✓ Installed prepare-commit-msg hook to .git/hooks/prepare-commit-msg -``` -Then, you need to apply patterns for git-secrets, you can install the AWS standard patterns by running the following command. -``` -git secrets --register-aws -``` - ## Components + As you work in the OpenSearch repo you may notice issues getting labeled with component labels. It's a housekeeping task to help group together similar pieces of work. You can pretty much ignore it, but if you're curious, here's what the different labels mean: ### Build libraries & interfaces + Tasks to make sure the build tasks are useful and packaging and distribution are easy. Includes: @@ -458,6 +434,7 @@ Includes: ### Clients & Libraries + APIs and communication mechanisms for external connections to OpenSearch. This includes the “library” directory in OpenSearch (a set of common functions). Includes: @@ -467,6 +444,7 @@ Includes: - CLI ### Plugins + Anything touching the plugin infrastructure within core OpenSearch. Includes: @@ -476,7 +454,8 @@ Includes: - Plugin interfaces -### Indexing & search +### Indexing & Search + The critical path of indexing and search, including: Measure index and search, performance, Improving the performance of indexing and search, ensure synchronization OpenSearch APIs with upstream Lucene change (e.g. new field types, changing doc values and codex). Includes: @@ -487,6 +466,7 @@ Includes: - DocValues ### Aggregations + Making sure OpenSearch can be used as a compute engine. Includes: @@ -495,6 +475,7 @@ Includes: - Framework ### Distributed Framework + Work to make sure that OpenSearch can scale in a distributed manner. Includes: @@ -506,15 +487,43 @@ Includes: - Shard Strategies - Circuit Breakers -## Submitting Changes +## Misc + +### Git Secrets + +Security is our top priority. Avoid checking in credentials. + +#### Installation +Install [awslabs/git-secrets](https://github.com/awslabs/git-secrets) by running the following commands. +``` +git clone https://github.com/awslabs/git-secrets.git +cd git-secrets +make install +``` + +#### Configuration +You can configure git secrets per repository, you need to change the directory to the root of the repository and run the following command. +``` +git secrets --install +✓ Installed commit-msg hook to .git/hooks/commit-msg +✓ Installed pre-commit hook to .git/hooks/pre-commit +✓ Installed prepare-commit-msg hook to .git/hooks/prepare-commit-msg +``` +Then, you need to apply patterns for git-secrets, you can install the AWS standard patterns by running the following command. +``` +git secrets --register-aws +``` + +### Submitting Changes See [CONTRIBUTING](CONTRIBUTING.md). -## Backports +### Backports The Github workflow in [`backport.yml`](.github/workflows/backport.yml) creates backport PRs automatically when the original PR with an appropriate label `backport ` is merged to main with the backport workflow run successfully on the PR. For example, if a PR on main needs to be backported to `1.x` branch, add a label `backport 1.x` to the PR and make sure the backport workflow runs on the PR along with other checks. Once this PR is merged to main, the workflow will create a backport PR to the `1.x` branch. -## LineLint +### LineLint + A linter in [`code-hygiene.yml`](.github/workflows/code-hygiene.yml) that validates simple newline and whitespace rules in all sorts of files. It can: - Recursively check a directory tree for files that do not end in a newline - Automatically fix these files by adding a newline or trimming extra newlines. @@ -529,7 +538,20 @@ Pass a list of files or directories to limit your search. linelint README.md LICENSE -# Lucene Snapshots +### Lucene Snapshots + The Github workflow in [lucene-snapshots.yml](.github/workflows/lucene-snapshots.yml) is a Github worfklow executable by maintainers to build a top-down snapshot build of lucene. -These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. -Example: `lucene = 10.0.0-snapshot-2e941fc`. +These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. Example: `lucene = 10.0.0-snapshot-2e941fc`. + +### Flakey Tests + +OpenSearch has a very large test suite with long running, often failing (flakey), integration tests. Such individual tests are labelled as [Flakey Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22Flakey+Random+Test+Failure%22). Your help is wanted fixing these! + +If you encounter a build/test failure in CI that is unrelated to the change in your pull request, it may be a known flakey test, or a new test failure. + +1. Follow failed CI links, and locate the failing test(s). +2. Copy-paste the failure into a comment of your PR. +3. Search through [issues](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22Flakey+Random+Test+Failure%22) using the name of the failed test for whether this is a known flakey test. +5. If an existing issue is found, paste a link to the known issue in a comment to your PR. +6. If no existing issue is found, open one. +7. Retry CI via the GitHub UX or by pushing an update to your PR. From d85f8cdcf90c2139beaaf953cd89bce86b9ce672 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 21 Oct 2022 16:47:27 -0400 Subject: [PATCH 078/122] Compatibility issue with /_mget: RHLC 2.x connected to OpenSearch Cluster 1.x (#4812) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + qa/mixed-cluster/build.gradle | 4 + .../org/opensearch/backwards/SearchingIT.java | 78 +++++++++++++++++++ .../action/get/MultiGetResponse.java | 5 ++ 4 files changed, 88 insertions(+) create mode 100644 qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bd55d3d4495a..d342489dc9895 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -153,6 +153,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- Compatibility issue with /_mget: RHLC 2.x connected to OpenSearch Cluster 1.x ([#4812](https://github.com/opensearch-project/OpenSearch/pull/4812)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 7a2c37639b93e..90aeb8faadf80 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -38,6 +38,10 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" apply plugin: 'opensearch.rest-resources' +dependencies { + testImplementation project(":client:rest-high-level") +} + restResources { restTests { includeCore '*' diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java new file mode 100644 index 0000000000000..839fe01ed81d6 --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.backwards; + +import org.apache.hc.core5.http.HttpHost; +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; +import org.opensearch.action.get.MultiGetRequest; +import org.opensearch.action.get.MultiGetResponse; +import org.opensearch.backwards.IndexingIT.Nodes; +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.RestClient; +import org.opensearch.client.RestHighLevelClient; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class SearchingIT extends OpenSearchRestTestCase { + public void testMultiGet() throws Exception { + final Set nodes = buildNodes(); + + final MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "id1"); + + try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(nodes.toArray(HttpHost[]::new)))) { + MultiGetResponse response = client.mget(multiGetRequest, RequestOptions.DEFAULT); + assertEquals(1, response.getResponses().length); + + assertTrue(response.getResponses()[0].isFailed()); + assertNotNull(response.getResponses()[0].getFailure()); + assertEquals(response.getResponses()[0].getFailure().getId(), "id1"); + assertEquals(response.getResponses()[0].getFailure().getIndex(), "index"); + assertThat(response.getResponses()[0].getFailure().getMessage(), containsString("no such index [index]")); + } + } + + private Set buildNodes() throws IOException, URISyntaxException { + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + final Set nodes = new HashSet<>(); + for (String id : nodesAsMap.keySet()) { + nodes.add(HttpHost.create((String) objectPath.evaluate("nodes." + id + ".http.publish_address"))); + } + + return nodes; + } +} diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index b3664935b9489..a763564ddf855 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -63,6 +63,10 @@ public class MultiGetResponse extends ActionResponse implements Iterable Date: Fri, 21 Oct 2022 16:39:10 -0700 Subject: [PATCH 079/122] Update pull request template to include tips (#4851) Signed-off-by: Rabi Panda --- .github/pull_request_template.md | 7 +++++++ CHANGELOG.md | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4537cadf71074..f54aa394cf83f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,3 +1,10 @@ + + ### Description [Describe what this change achieves] diff --git a/CHANGELOG.md b/CHANGELOG.md index d342489dc9895..4923becc2488c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) - Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) - Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) +- Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 @@ -153,7 +154,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Compatibility issue with /_mget: RHLC 2.x connected to OpenSearch Cluster 1.x ([#4812](https://github.com/opensearch-project/OpenSearch/pull/4812)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 49a9b81a9ec9169143bbb352f75423e404ecfe9c Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Sat, 22 Oct 2022 00:45:15 +0000 Subject: [PATCH 080/122] Skip geojson test for version below 2.4 (#4860) Signed-off-by: Heemin Kim Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../resources/rest-api-spec/test/index/80_geo_point.yml | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4923becc2488c..f71ddfc95f91a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -154,6 +154,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml index d15529af0041a..7d6c2b835f1f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml @@ -12,6 +12,9 @@ setup: --- "Single point test": + - skip: + version: " - 2.3.99" + reason: "geojson format is supported in 2.4 and above" - do: bulk: refresh: true @@ -75,6 +78,9 @@ setup: --- "Multi points test": + - skip: + version: " - 2.3.99" + reason: "geojson format is supported in 2.4 and above" - do: bulk: refresh: true From 6571db75c51bbe1641ea7f6f28bd0bc17a431e29 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Sat, 22 Oct 2022 07:33:34 -0700 Subject: [PATCH 081/122] Fix bug in SlicedInputStream with zero length (#4863) Per the contract of InputStream#read(byte[], int, int): If len is zero, then no bytes are read and 0 is returned SlicedInputStream had a bug where if a zero length was passed then it would drain all the underlying streams and return -1. This was uncovered by using InputStream#readAllBytes in new code under development. The only existing usage of SlicedInputStream should not be vulnerable to this bug. I've also added a check for invalid arguments and created tests to ensure the proper exceptions are thrown per the InputStream contract. In the test I've replaced a "readFully" method with an equivalent "readNBytes" that was introduced in Java 11. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../blobstore/SlicedInputStream.java | 6 ++ .../blobstore/SlicedInputStreamTests.java | 70 ++++++++++++++----- 3 files changed, 61 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f71ddfc95f91a..6eeb71e13e1f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -155,6 +155,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) +- Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/SlicedInputStream.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/SlicedInputStream.java index b9b27547fe50c..ae2e33d41c6ce 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/SlicedInputStream.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/SlicedInputStream.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Objects; /** * A {@link SlicedInputStream} is a logical @@ -100,6 +101,11 @@ public final int read() throws IOException { @Override public final int read(byte[] buffer, int offset, int length) throws IOException { + Objects.checkFromIndexSize(offset, length, buffer.length); + if (length == 0) { + return 0; + } + final InputStream stream = currentStream(); if (stream == null) { return -1; diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java index 3e337bbe3adae..76fb8f62b5468 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -32,6 +32,8 @@ package org.opensearch.index.snapshots.blobstore; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + +import org.hamcrest.MatcherAssert; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -39,6 +41,9 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import java.util.Random; import static org.hamcrest.Matchers.equalTo; @@ -86,11 +91,9 @@ protected InputStream openSlice(int slice) throws IOException { assertThat(random.nextInt(Byte.MAX_VALUE), equalTo(input.read())); break; default: - byte[] b = randomBytes(random); - byte[] buffer = new byte[b.length]; - int read = readFully(input, buffer); - assertThat(b.length, equalTo(read)); - assertArrayEquals(b, buffer); + byte[] expectedBytes = randomBytes(random); + byte[] actualBytes = input.readNBytes(expectedBytes.length); + assertArrayEquals(expectedBytes, actualBytes); break; } } @@ -107,19 +110,45 @@ protected InputStream openSlice(int slice) throws IOException { } - private int readFully(InputStream stream, byte[] buffer) throws IOException { - for (int i = 0; i < buffer.length;) { - int read = stream.read(buffer, i, buffer.length - i); - if (read == -1) { - if (i == 0) { - return -1; - } else { - return i; - } + public void testReadZeroLength() throws IOException { + try (InputStream input = createSingleByteStream()) { + final byte[] buffer = new byte[100]; + final int read = input.read(buffer, 0, 0); + MatcherAssert.assertThat(read, equalTo(0)); + } + } + + public void testInvalidOffsetAndLength() throws IOException { + try (InputStream input = createSingleByteStream()) { + final byte[] buffer = new byte[100]; + expectThrows(NullPointerException.class, () -> input.read(null, 0, 10)); + expectThrows(IndexOutOfBoundsException.class, () -> input.read(buffer, -1, 10)); + expectThrows(IndexOutOfBoundsException.class, () -> input.read(buffer, 0, -1)); + expectThrows(IndexOutOfBoundsException.class, () -> input.read(buffer, 0, buffer.length + 1)); + } + } + + public void testReadAllBytes() throws IOException { + final byte[] expectedResults = randomByteArrayOfLength(50_000); + final int numSlices = 200; + final int slizeSize = expectedResults.length / numSlices; + + final List arraySlices = new ArrayList<>(numSlices); + for (int i = 0; i < numSlices; i++) { + final int offset = slizeSize * i; + arraySlices.add(Arrays.copyOfRange(expectedResults, offset, offset + slizeSize)); + } + // Create a SlicedInputStream that will return the expected result in 2 slices + final byte[] actualResults; + try (InputStream is = new SlicedInputStream(numSlices) { + @Override + protected InputStream openSlice(int slice) { + return new ByteArrayInputStream(arraySlices.get(slice)); } - i += read; + }) { + actualResults = is.readAllBytes(); } - return buffer.length; + assertArrayEquals(expectedResults, actualResults); } private byte[] randomBytes(Random random) { @@ -129,6 +158,15 @@ private byte[] randomBytes(Random random) { return data; } + private static InputStream createSingleByteStream() { + return new SlicedInputStream(1) { + @Override + protected InputStream openSlice(int slice) { + return new ByteArrayInputStream(new byte[] { 1 }); + } + }; + } + private static final class CheckClosedInputStream extends FilterInputStream { public boolean closed = false; From 2310a3d53991eea6c53ffdc3cc28da1846a124a5 Mon Sep 17 00:00:00 2001 From: Sergey Nuyanzin Date: Sat, 22 Oct 2022 17:40:03 +0200 Subject: [PATCH 082/122] Add icon for Intellijidea toolbox (#4882) * Add icon for Intellijidea toolbox Signed-off-by: Sergey Nuyanzin * Add record in changelog Signed-off-by: Sergey Nuyanzin Signed-off-by: Sergey Nuyanzin --- .gitignore | 1 + .idea/icon.svg | 5 +++++ CHANGELOG.md | 1 + 3 files changed, 7 insertions(+) create mode 100644 .idea/icon.svg diff --git a/.gitignore b/.gitignore index 8ea328ce2f1e9..a0dabfb8798f9 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ out/ !.idea/inspectionProfiles/Project_Default.xml !.idea/runConfigurations/Debug_OpenSearch.xml !.idea/vcs.xml +!.idea/icon.svg # These files are generated in the main tree by IntelliJ benchmarks/src/main/generated/* diff --git a/.idea/icon.svg b/.idea/icon.svg new file mode 100644 index 0000000000000..4f8132d18a2e0 --- /dev/null +++ b/.idea/icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/CHANGELOG.md b/CHANGELOG.md index 6eeb71e13e1f5..b4e5fe3eaf906 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) +- Add icon for IntelliJidea toolbox ([#4882](https://github.com/opensearch-project/OpenSearch/pull/4882)) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) - Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) - Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) From dfa1118eee84e2f54a304ba917885f98a571b23c Mon Sep 17 00:00:00 2001 From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Date: Mon, 24 Oct 2022 14:27:22 +0200 Subject: [PATCH 083/122] Add missing no jdk distributions (#4722) * add missing no-jdk gradle targets for distributions by convention all supported platforms should also have a gradle target to produce a no-jdk build and that target should be called `no-jdk-[platform]`. this is a first step in the path to publishing the no-jdk distributions as part of the release, as there are use-cases where a specific JDK should be used instead of the packaged one and it's cleaner to use a small distribution which doesn't bring along an unneeded JDK (the JDK greatly increases the resulting distribution since). note that i failed to rename the `linux-s390x` target, it seems that there's too much generic build code out there which tries to stitch together that name and then fails if it doesn't find it. this is part of opensearch-build#99. Signed-off-by: Ralph Ursprung * add basic README for `:distribution` this is primarily to document the JDK & No JDK setup (to codify it for the future), however the README can be extended in the future to document more information which is currently hidden away only somewhere in the code. Signed-off-by: Ralph Ursprung Signed-off-by: Ralph Ursprung --- CHANGELOG.md | 1 + distribution/README.md | 12 ++++++++++++ distribution/archives/build.gradle | 9 +++++++++ distribution/packages/build.gradle | 8 ++++++++ settings.gradle | 3 +++ 5 files changed, 33 insertions(+) create mode 100644 distribution/README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index b4e5fe3eaf906..7fef74a41c69b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) - Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) - Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) +- Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/distribution/README.md b/distribution/README.md new file mode 100644 index 0000000000000..b9e948b625659 --- /dev/null +++ b/distribution/README.md @@ -0,0 +1,12 @@ +# Distributions +This subproject contains the necessary tooling to build the various distributions. +Note that some of this can only be run on the specific architecture and does not support cross-compile. + +The following distributions are being built: +* Archives (`*.zip`, `*.tar`): these form the basis for all other OpenSearch distributions +* Packages (`*.deb`, `*.rpm`): specific package formats for some Linux distributions +* Docker images +* Backwards compatibility tests: used internally for version compatibility testing, not for public consumption + +## With or Without JDK? +For each supported platform there should be both a target bundled with a JDK and a target without a bundled JDK. diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 1376b8d419f6e..587175eef4008 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -137,6 +137,13 @@ distribution_archives { } } + noJdkLinuxArm64Tar { + archiveClassifier = 'no-jdk-linux-arm64' + content { + archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', false) + } + } + linuxTar { archiveClassifier = 'linux-x64' content { @@ -151,6 +158,8 @@ distribution_archives { } } + // Should really be `no-jdk-linux-s390x` as it ships without a JDK, however it seems that the build can't handle + // the absence of the `linux-s390x` target. linuxS390xTar { archiveClassifier = 'linux-s390x' content { diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index df3049d7684c4..d9db3448104c8 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -350,6 +350,10 @@ tasks.register('buildArm64Deb', Deb) { configure(commonDebConfig(true, 'arm64')) } +tasks.register('buildNoJdkArm64Deb', Deb) { + configure(commonDebConfig(false, 'arm64')) +} + tasks.register('buildDeb', Deb) { configure(commonDebConfig(true, 'x64')) } @@ -387,6 +391,10 @@ tasks.register('buildArm64Rpm', Rpm) { configure(commonRpmConfig(true, 'arm64')) } +tasks.register('buildNoJdkArm64Rpm', Rpm) { + configure(commonRpmConfig(false, 'arm64')) +} + tasks.register('buildRpm', Rpm) { configure(commonRpmConfig(true, 'x64')) } diff --git a/settings.gradle b/settings.gradle index 92e07cbb2e7fb..ed01f4c4339c8 100644 --- a/settings.gradle +++ b/settings.gradle @@ -47,6 +47,7 @@ List projects = [ 'distribution:archives:freebsd-tar', 'distribution:archives:no-jdk-freebsd-tar', 'distribution:archives:linux-arm64-tar', + 'distribution:archives:no-jdk-linux-arm64-tar', 'distribution:archives:linux-s390x-tar', 'distribution:archives:linux-tar', 'distribution:archives:no-jdk-linux-tar', @@ -57,9 +58,11 @@ List projects = [ 'distribution:docker:docker-build-context', 'distribution:docker:docker-export', 'distribution:packages:arm64-deb', + 'distribution:packages:no-jdk-arm64-deb', 'distribution:packages:deb', 'distribution:packages:no-jdk-deb', 'distribution:packages:arm64-rpm', + 'distribution:packages:no-jdk-arm64-rpm', 'distribution:packages:rpm', 'distribution:packages:no-jdk-rpm', 'distribution:bwc:bugfix', From e3572464859d728c64d5a6f85e8ba0bd51314a5d Mon Sep 17 00:00:00 2001 From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Date: Mon, 24 Oct 2022 20:46:25 +0200 Subject: [PATCH 084/122] copy `build.sh` over from opensearch-build (#4887) the build script should be located in this repository instead of centrally in [opensearch-build][] to allow per-branch specific builds (newer versions might have newer requirements, which currently cannot be reflected). the file was copied 1:1 from `scripts/components/OpenSearch/build.sh` as of commit 9e1aa38 in the [opensearch-build][] repository. this is part of opensearch-build#99 and needs to be backported to all active branches because the file here will only be used by the build once the central one in opensearch-build has been removed which in turn can only happen when all active branches have the `build.sh`. [opensearch-build]: https://github.com/opensearch-project/opensearch-build Signed-off-by: Ralph Ursprung Signed-off-by: Ralph Ursprung --- CHANGELOG.md | 1 + scripts/build.sh | 161 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100755 scripts/build.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fef74a41c69b..80aa0fce94821 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) - Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) - Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) +- Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000000000..a0917776507be --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,161 @@ +#!/bin/bash + +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +set -ex + +function usage() { + echo "Usage: $0 [args]" + echo "" + echo "Arguments:" + echo -e "-v VERSION\t[Required] OpenSearch version." + echo -e "-q QUALIFIER\t[Optional] Version qualifier." + echo -e "-s SNAPSHOT\t[Optional] Build a snapshot, default is 'false'." + echo -e "-p PLATFORM\t[Optional] Platform, default is 'uname -s'." + echo -e "-a ARCHITECTURE\t[Optional] Build architecture, default is 'uname -m'." + echo -e "-d DISTRIBUTION\t[Optional] Distribution, default is 'tar'." + echo -e "-o OUTPUT\t[Optional] Output path, default is 'artifacts'." + echo -e "-h help" +} + +while getopts ":h:v:q:s:o:p:a:d:" arg; do + case $arg in + h) + usage + exit 1 + ;; + v) + VERSION=$OPTARG + ;; + q) + QUALIFIER=$OPTARG + ;; + s) + SNAPSHOT=$OPTARG + ;; + o) + OUTPUT=$OPTARG + ;; + p) + PLATFORM=$OPTARG + ;; + a) + ARCHITECTURE=$OPTARG + ;; + d) + DISTRIBUTION=$OPTARG + ;; + :) + echo "Error: -${OPTARG} requires an argument" + usage + exit 1 + ;; + ?) + echo "Invalid option: -${arg}" + exit 1 + ;; + esac +done + +if [ -z "$VERSION" ]; then + echo "Error: You must specify the OpenSearch version" + usage + exit 1 +fi + +[ -z "$OUTPUT" ] && OUTPUT=artifacts + +mkdir -p $OUTPUT/maven/org/opensearch + +# Build project and publish to maven local. +./gradlew publishToMavenLocal -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Publish to existing test repo, using this to stage release versions of the artifacts that can be released from the same build. +./gradlew publishNebulaPublicationToTestRepository -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Copy maven publications to be promoted +cp -r ./build/local-test-repo/org/opensearch "${OUTPUT}"/maven/org + +# Assemble distribution artifact +# see https://github.com/opensearch-project/OpenSearch/blob/main/settings.gradle#L34 for other distribution targets + +[ -z "$PLATFORM" ] && PLATFORM=$(uname -s | awk '{print tolower($0)}') +[ -z "$ARCHITECTURE" ] && ARCHITECTURE=`uname -m` +[ -z "$DISTRIBUTION" ] && DISTRIBUTION="tar" + +case $PLATFORM-$DISTRIBUTION-$ARCHITECTURE in + linux-tar-x64|darwin-tar-x64) + PACKAGE="tar" + EXT="tar.gz" + TYPE="archives" + TARGET="$PLATFORM-$PACKAGE" + SUFFIX="$PLATFORM-x64" + ;; + linux-tar-arm64|darwin-tar-arm64) + PACKAGE="tar" + EXT="tar.gz" + TYPE="archives" + TARGET="$PLATFORM-arm64-$PACKAGE" + SUFFIX="$PLATFORM-arm64" + ;; + linux-rpm-x64) + PACKAGE="rpm" + EXT="rpm" + TYPE="packages" + TARGET="rpm" + SUFFIX="x86_64" + ;; + linux-rpm-arm64) + PACKAGE="rpm" + EXT="rpm" + TYPE="packages" + TARGET="arm64-rpm" + SUFFIX="aarch64" + ;; + windows-zip-x64) + PACKAGE="zip" + EXT="zip" + TYPE="archives" + TARGET="$PLATFORM-$PACKAGE" + SUFFIX="$PLATFORM-x64" + ;; + windows-zip-arm64) + PACKAGE="zip" + EXT="zip" + TYPE="archives" + TARGET="$PLATFORM-arm64-$PACKAGE" + SUFFIX="$PLATFORM-arm64" + ;; + *) + echo "Unsupported platform-distribution-architecture combination: $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" + exit 1 + ;; +esac + +echo "Building OpenSearch for $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" + +./gradlew :distribution:$TYPE:$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Copy artifact to dist folder in bundle build output +[[ "$SNAPSHOT" == "true" ]] && IDENTIFIER="-SNAPSHOT" +ARTIFACT_BUILD_NAME=`ls distribution/$TYPE/$TARGET/build/distributions/ | grep "opensearch-min.*$SUFFIX.$EXT"` +mkdir -p "${OUTPUT}/dist" +cp distribution/$TYPE/$TARGET/build/distributions/$ARTIFACT_BUILD_NAME "${OUTPUT}"/dist/$ARTIFACT_BUILD_NAME + +echo "Building core plugins..." +mkdir -p "${OUTPUT}/core-plugins" +cd plugins +../gradlew assemble -Dbuild.snapshot="$SNAPSHOT" -Dbuild.version_qualifier=$QUALIFIER +cd .. +for plugin in plugins/*; do + PLUGIN_NAME=$(basename "$plugin") + if [ -d "$plugin" ] && [ "examples" != "$PLUGIN_NAME" ]; then + PLUGIN_ARTIFACT_BUILD_NAME=`ls "$plugin"/build/distributions/ | grep "$PLUGIN_NAME.*$IDENTIFIER.zip"` + cp "$plugin"/build/distributions/"$PLUGIN_ARTIFACT_BUILD_NAME" "${OUTPUT}"/core-plugins/"$PLUGIN_ARTIFACT_BUILD_NAME" + fi +done From 74b8ecdf39c3cc78014d39f6cf89613467c94b57 Mon Sep 17 00:00:00 2001 From: Ryan Bogan <10944539+ryanbogan@users.noreply.github.com> Date: Mon, 24 Oct 2022 11:48:28 -0700 Subject: [PATCH 085/122] Bump reactor-netty-http to 1.0.24 in repository-azure (#4880) * Bump reactor-netty-http to 1.0.24 Signed-off-by: Ryan Bogan * Add CHANGELOG entry Signed-off-by: Ryan Bogan * Bump reactor-netty-core to 1.0.24 to fix failing tests Signed-off-by: Ryan Bogan * Update SHAs Signed-off-by: Ryan Bogan * Addressed PR Comments Signed-off-by: Ryan Bogan Signed-off-by: Ryan Bogan --- CHANGELOG.md | 3 ++- plugins/repository-azure/build.gradle | 4 ++-- .../licenses/reactor-netty-core-1.0.22.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.0.24.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.0.23.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.0.24.jar.sha1 | 1 + 6 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 80aa0fce94821..139fc78889488 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) -- Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#]()) +- Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#4790](https://github.com/opensearch-project/OpenSearch/pull/4790)) +- Bump reactor-netty-http to 1.0.24 in repository-azure ([#4880](https://github.com/opensearch-project/OpenSearch/pull/4880)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6be815e4a0f46..87f3a25d3036b 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -58,8 +58,8 @@ dependencies { api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.4.23' api 'io.projectreactor.netty:reactor-netty:1.0.18' - api 'io.projectreactor.netty:reactor-netty-core:1.0.22' - api 'io.projectreactor.netty:reactor-netty-http:1.0.23' + api 'io.projectreactor.netty:reactor-netty-core:1.0.24' + api 'io.projectreactor.netty:reactor-netty-http:1.0.24' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 deleted file mode 100644 index 4c82e37d27043..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c2a258ac71e525c65f2e3a0bcf458b6c79bbc16 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 new file mode 100644 index 0000000000000..3f5c8670c6c00 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 @@ -0,0 +1 @@ +feaecb39237170aafb23935e9b383e8dda281379 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 deleted file mode 100644 index 0b26b80fe3915..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63932f2b675f451135986b3723a12d45e818b170 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 new file mode 100644 index 0000000000000..aa0ca72e38cd0 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 @@ -0,0 +1 @@ +2fac480a17f752335318f103ab91427bdfb7716a \ No newline at end of file From 67a29dff7a1f72abf7295360bd541d1cd0155f73 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Mon, 24 Oct 2022 15:36:35 -0400 Subject: [PATCH 086/122] Add a link to the flakey test dev guide. (#4872) Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Daniel (dB.) Doubrovkine --- .github/workflows/gradle-check.yml | 3 +++ CHANGELOG.md | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 892d04936b743..3089a595c6c18 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -91,3 +91,6 @@ jobs: * **RESULT:** ${{ env.result }} :x: * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} + Please examine the workflow log, locate, and copy-paste the failure below. + [Flakey test?](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flakey-tests) + Iterate to green. diff --git a/CHANGELOG.md b/CHANGELOG.md index 139fc78889488..5ba1759e6bc47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) - Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) - Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) +- Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) - Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) ### Dependencies From 7f5f8ea6bc3f870d47aa729e4367b38484350280 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Oct 2022 16:16:10 -0400 Subject: [PATCH 087/122] Bump protobuf-java from 3.21.7 to 3.21.8 in /test/fixtures/hdfs-fixture (#4886) * Bump protobuf-java from 3.21.7 to 3.21.8 in /test/fixtures/hdfs-fixture Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.21.7 to 3.21.8. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.21.7...v3.21.8) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ba1759e6bc47..3552564e34465 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) - Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) - Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) +### Dependencies +- Bumps `protobuf-java` from 3.21.7 to 3.21.8 ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 @@ -187,4 +189,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 03455adc8033d..b35cb7eb1cfd5 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -48,5 +48,5 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" - api "com.google.protobuf:protobuf-java:3.21.7" + api "com.google.protobuf:protobuf-java:3.21.8" } From 272f1d1a976254ca865b63e7304d4d88e79bb9f5 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Mon, 24 Oct 2022 16:18:37 -0500 Subject: [PATCH 088/122] Add project health badges (#4843) * Add project health badges Signed-off-by: Peter Nied --- CHANGELOG.md | 4 ++-- README.md | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3552564e34465..9e1743df0fe90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,8 +41,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) - Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) - Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) -### Dependencies -- Bumps `protobuf-java` from 3.21.7 to 3.21.8 +- Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 @@ -72,6 +71,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) - Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#4790](https://github.com/opensearch-project/OpenSearch/pull/4790)) - Bump reactor-netty-http to 1.0.24 in repository-azure ([#4880](https://github.com/opensearch-project/OpenSearch/pull/4880)) +- Bumps `protobuf-java` from 3.21.7 to 3.21.8 ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/README.md b/README.md index a7abedeefde8e..45d0a624ae0aa 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,13 @@ [![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/opensearch/) [![Documentation](https://img.shields.io/badge/documentation-reference-blue)](https://opensearch.org/docs/latest/opensearch/index/) -[![codecov](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) +[![Code Coverage](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) +[![Untriaged Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/untriaged?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"untriaged") +[![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") +[![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) +[![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) +[![2.4 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.4.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.4.0") +[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) From 8fda187bb459757164cc80c91ca305d274ca2b53 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 24 Oct 2022 19:38:13 -0400 Subject: [PATCH 089/122] Upgrade netty to 4.1.84.Final (#4893) * Upgrade netty to 4.1.84.Final Signed-off-by: Craig Perkins * Add line in CHANGELOG Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins --- CHANGELOG.md | 3 ++- buildSrc/version.properties | 2 +- modules/transport-netty4/build.gradle | 6 ++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.84.Final.jar.sha1 | 1 + ...netty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...netty-transport-native-unix-common-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 | 1 + ...netty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...netty-transport-native-unix-common-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-all-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.84.Final.jar.sha1 | 1 + plugins/transport-nio/build.gradle | 6 ++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.84.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.84.Final.jar.sha1 | 1 + 50 files changed, 38 insertions(+), 25 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e1743df0fe90..8b2609f1c5fd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#4790](https://github.com/opensearch-project/OpenSearch/pull/4790)) - Bump reactor-netty-http to 1.0.24 in repository-azure ([#4880](https://github.com/opensearch-project/OpenSearch/pull/4880)) - Bumps `protobuf-java` from 3.21.7 to 3.21.8 +- Upgrade netty to 4.1.84.Final ([#4893](https://github.com/opensearch-project/OpenSearch/pull/4893)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) @@ -189,4 +190,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/buildSrc/version.properties b/buildSrc/version.properties index db0e5b142f7de..4704e0c71880a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,7 +22,7 @@ jettison = 1.5.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.12.1 -netty = 4.1.79.Final +netty = 4.1.84.Final joda = 2.10.13 # client dependencies diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5d2047d7f18a2..9e0d9955a65a1 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -156,6 +156,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..25a6f9ecf50b6 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..032a8f1ed954e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1e985edfce65e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5fe8c5420cd74 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..beaa2cce654c3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..afd28b451ba12 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..07aa37fc76524 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5e12ada3f5c10 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..6273c55f3acbd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a1753b194ea31..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..f27ecd081f65d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +48ce1da1bc12b830f6ffcdc5f0329639eb11e2fb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5fe8c5420cd74 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 deleted file mode 100644 index 913f0e7685c86..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1eef1b7841930 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +8cef741b42de5a1b21a8313fffcf2b518138c00b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 deleted file mode 100644 index dbb072f3f665f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..0c3ed9425f8b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +8d9f2282f4da2486eed7797bc8622437eda7ce65 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a5d1be00d9c29..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..2835332c51158 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +3755d26967afca20b925c07d41e6ed3ec38c6822 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..6273c55f3acbd --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 deleted file mode 100644 index 724950db96f09..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..14003104a623f --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +f1a994d19e9971ba6f1b8abf4ebf912a21cec983 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a7e8c42a4e2d3..c5b401de60c8c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,6 +83,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..25a6f9ecf50b6 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..032a8f1ed954e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1e985edfce65e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..beaa2cce654c3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..afd28b451ba12 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..07aa37fc76524 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5e12ada3f5c10 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file From 1a40bd59a4223c1b8158856109cc2ac0c43815bf Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Tue, 25 Oct 2022 18:19:25 +0530 Subject: [PATCH 090/122] Add changes for graceful node decommission (#4586) * Add changes for graceful node decommission Signed-off-by: pranikum <109206473+pranikum@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../AwarenessAttributeDecommissionIT.java | 30 +++++-- .../awareness/put/DecommissionRequest.java | 31 +++++++ .../put/TransportDecommissionAction.java | 2 +- .../DecommissionAttributeMetadata.java | 18 ++-- .../decommission/DecommissionController.java | 64 +++++++++++++++ .../decommission/DecommissionService.java | 82 +++++++++++++++++-- .../decommission/DecommissionStatus.java | 7 ++ .../admin/cluster/RestDecommissionAction.java | 5 ++ .../put/DecommissionRequestTests.java | 14 ++++ .../coordination/JoinTaskExecutorTests.java | 6 +- .../DecommissionControllerTests.java | 63 ++++++++++---- .../DecommissionServiceTests.java | 64 +++++++++++++-- .../cluster/RestDecommissionActionTests.java | 31 +++++++ 14 files changed, 374 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b2609f1c5fd5..7459e3892a054 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,7 +42,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) - Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) - Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) - +- Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index 2dc964e3e8845..14ec041b7464b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -120,31 +120,44 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx logger.info("--> starting decommissioning nodes in zone {}", 'c'); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + // Set the timeout to 0 to do immediate Decommission DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + decommissionRequest.setNoDelay(true); DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); assertTrue(decommissionResponse.isAcknowledged()); + logger.info("--> Received decommissioning nodes in zone {}", 'c'); + // Keep some delay for scheduler to invoke decommission flow + Thread.sleep(500); + // Will wait for all events to complete client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + logger.info("--> Received LANGUID event"); + // assert that decommission status is successful - GetDecommissionStateResponse response = client().execute( + GetDecommissionStateResponse response = client(clusterManagerNodes.get(0)).execute( GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest(decommissionAttribute.attributeName()) ).get(); assertEquals(response.getAttributeValue(), decommissionAttribute.attributeValue()); - assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); + assertEquals(DecommissionStatus.SUCCESSFUL, response.getDecommissionStatus()); + logger.info("--> Decommission status is successful"); ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); assertEquals(4, clusterState.nodes().getSize()); + logger.info("--> Got cluster state with 4 nodes."); // assert status on nodes that are part of cluster currently Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + DiscoveryNode clusterManagerNodeAfterDecommission = null; while (discoveryNodeIterator.hasNext()) { // assert no node has decommissioned attribute DiscoveryNode node = discoveryNodeIterator.next(); assertNotEquals(node.getAttributes().get("zone"), "c"); - + if (node.isClusterManagerNode()) { + clusterManagerNodeAfterDecommission = node; + } // assert all the nodes has status as SUCCESSFUL ClusterService localNodeClusterService = internalCluster().getInstance(ClusterService.class, node.getName()); assertEquals( @@ -152,6 +165,8 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx DecommissionStatus.SUCCESSFUL ); } + assertNotNull("Cluster Manager not found after decommission", clusterManagerNodeAfterDecommission); + logger.info("--> Cluster Manager node found after decommission"); // assert status on decommissioned node // Here we will verify that until it got kicked out, it received appropriate status updates @@ -163,16 +178,18 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx decommissionedNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), DecommissionStatus.IN_PROGRESS ); + logger.info("--> Verified the decommissioned node Has in progress state."); // Will wait for all events to complete - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - + client(clusterManagerNodeAfterDecommission.getName()).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + logger.info("--> Got LANGUID event"); // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodes.get(0)).execute( + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodeAfterDecommission.getName()).execute( DeleteDecommissionStateAction.INSTANCE, new DeleteDecommissionStateRequest() ).get(); assertTrue(deleteDecommissionStateResponse.isAcknowledged()); + logger.info("--> Deleting decommission done."); // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) // as by then all nodes should have joined the cluster @@ -201,6 +218,7 @@ public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + decommissionRequest.setNoDelay(true); assertBusy(() -> { DecommissioningFailedException ex = expectThrows( DecommissioningFailedException.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index 79a6688dc6049..e2fb353b6c749 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -14,6 +14,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; import java.io.IOException; @@ -28,8 +29,15 @@ */ public class DecommissionRequest extends ClusterManagerNodeRequest { + public static final TimeValue DEFAULT_NODE_DRAINING_TIMEOUT = TimeValue.timeValueSeconds(120); + private DecommissionAttribute decommissionAttribute; + private TimeValue delayTimeout = DEFAULT_NODE_DRAINING_TIMEOUT; + + // holder for no_delay param. To avoid draining time timeout. + private boolean noDelay = false; + public DecommissionRequest() {} public DecommissionRequest(DecommissionAttribute decommissionAttribute) { @@ -39,12 +47,14 @@ public DecommissionRequest(DecommissionAttribute decommissionAttribute) { public DecommissionRequest(StreamInput in) throws IOException { super(in); decommissionAttribute = new DecommissionAttribute(in); + this.delayTimeout = in.readTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); decommissionAttribute.writeTo(out); + out.writeTimeValue(delayTimeout); } /** @@ -65,6 +75,19 @@ public DecommissionAttribute getDecommissionAttribute() { return this.decommissionAttribute; } + public TimeValue getDelayTimeout() { + return this.delayTimeout; + } + + public void setNoDelay(boolean noDelay) { + this.delayTimeout = TimeValue.ZERO; + this.noDelay = noDelay; + } + + public boolean isNoDelay() { + return noDelay; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -74,6 +97,14 @@ public ActionRequestValidationException validate() { if (decommissionAttribute.attributeValue() == null || Strings.isEmpty(decommissionAttribute.attributeValue())) { validationException = addValidationError("attribute value is missing", validationException); } + // This validation should not fail since we are not allowing delay timeout to be set externally. + // Still keeping it for double check. + if (noDelay && delayTimeout.getSeconds() > 0) { + final String validationMessage = "Invalid decommission request. no_delay is true and delay_timeout is set to " + + delayTimeout.getSeconds() + + "] Seconds"; + validationException = addValidationError(validationMessage, validationException); + } return validationException; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java index 3a067d2f110b9..6f4e3cf82d2ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java @@ -76,6 +76,6 @@ protected ClusterBlockException checkBlock(DecommissionRequest request, ClusterS protected void clusterManagerOperation(DecommissionRequest request, ClusterState state, ActionListener listener) throws Exception { logger.info("starting awareness attribute [{}] decommissioning", request.getDecommissionAttribute().toString()); - decommissionService.startDecommissionAction(request.getDecommissionAttribute(), listener); + decommissionService.startDecommissionAction(request, listener); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index d3d508bf36451..56b8e6aea7c4a 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.EnumSet; import java.util.Objects; +import java.util.Set; /** * Contains metadata about decommission attribute @@ -88,11 +89,14 @@ public synchronized void validateNewStatus(DecommissionStatus newStatus) { } // We don't expect that INIT will be new status, as it is registered only when starting the decommission action switch (newStatus) { + case DRAINING: + validateStatus(Set.of(DecommissionStatus.INIT), newStatus); + break; case IN_PROGRESS: - validateStatus(DecommissionStatus.INIT, newStatus); + validateStatus(Set.of(DecommissionStatus.DRAINING, DecommissionStatus.INIT), newStatus); break; case SUCCESSFUL: - validateStatus(DecommissionStatus.IN_PROGRESS, newStatus); + validateStatus(Set.of(DecommissionStatus.IN_PROGRESS), newStatus); break; default: throw new IllegalArgumentException( @@ -101,17 +105,17 @@ public synchronized void validateNewStatus(DecommissionStatus newStatus) { } } - private void validateStatus(DecommissionStatus expected, DecommissionStatus next) { - if (status.equals(expected) == false) { + private void validateStatus(Set expectedStatuses, DecommissionStatus next) { + if (expectedStatuses.contains(status) == false) { assert false : "can't move decommission status to [" + next + "]. current status: [" + status - + "] (expected [" - + expected + + "] (allowed statuses [" + + expectedStatuses + "])"; throw new IllegalStateException( - "can't move decommission status to [" + next + "]. current status: [" + status + "] (expected [" + expected + "])" + "can't move decommission status to [" + next + "]. current status: [" + status + "] (expected [" + expectedStatuses + "])" ); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java index b58d99a9d59db..ffb20a05b3ef7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -18,6 +18,10 @@ import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -32,6 +36,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.http.HttpStats; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -39,7 +44,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -271,4 +278,61 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } + + private void logActiveConnections(NodesStatsResponse nodesStatsResponse) { + if (nodesStatsResponse == null || nodesStatsResponse.getNodes() == null) { + logger.info("Node stats response received is null/empty."); + return; + } + + Map nodeActiveConnectionMap = new HashMap<>(); + List responseNodes = nodesStatsResponse.getNodes(); + for (int i = 0; i < responseNodes.size(); i++) { + HttpStats httpStats = responseNodes.get(i).getHttp(); + DiscoveryNode node = responseNodes.get(i).getNode(); + nodeActiveConnectionMap.put(node.getId(), httpStats.getServerOpen()); + } + logger.info("Decommissioning node with connections : [{}]", nodeActiveConnectionMap); + } + + void getActiveRequestCountOnDecommissionedNodes(Set decommissionedNodes) { + if (decommissionedNodes == null || decommissionedNodes.isEmpty()) { + return; + } + String[] nodes = decommissionedNodes.stream().map(DiscoveryNode::getId).toArray(String[]::new); + if (nodes.length == 0) { + return; + } + + final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodes); + nodesStatsRequest.clear(); + nodesStatsRequest.addMetric(NodesStatsRequest.Metric.HTTP.metricName()); + + transportService.sendRequest( + transportService.getLocalNode(), + NodesStatsAction.NAME, + nodesStatsRequest, + new TransportResponseHandler() { + @Override + public void handleResponse(NodesStatsResponse response) { + logActiveConnections(response); + } + + @Override + public void handleException(TransportException exp) { + logger.error("Failure occurred while dumping connection for decommission nodes - ", exp.unwrapCause()); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public NodesStatsResponse read(StreamInput in) throws IOException { + return new NodesStatsResponse(in); + } + } + ); + } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index e6639ae058066..f284eb476a755 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -115,13 +116,14 @@ private void setForcedAwarenessAttributes(Settings forceSettings) { * Starts the new decommission request and registers the metadata with status as {@link DecommissionStatus#INIT} * Once the status is updated, it tries to exclude to-be-decommissioned cluster manager eligible nodes from Voting Configuration * - * @param decommissionAttribute register decommission attribute in the metadata request + * @param decommissionRequest decommission request Object * @param listener register decommission listener */ public void startDecommissionAction( - final DecommissionAttribute decommissionAttribute, + final DecommissionRequest decommissionRequest, final ActionListener listener ) { + final DecommissionAttribute decommissionAttribute = decommissionRequest.getDecommissionAttribute(); // register the metadata with status as INIT as first step clusterService.submitStateUpdateTask("decommission [" + decommissionAttribute + "]", new ClusterStateUpdateTask(Priority.URGENT) { @Override @@ -161,15 +163,16 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS decommissionAttributeMetadata.decommissionAttribute(), decommissionAttributeMetadata.status() ); - decommissionClusterManagerNodes(decommissionAttributeMetadata.decommissionAttribute(), listener); + decommissionClusterManagerNodes(decommissionRequest, listener); } }); } private synchronized void decommissionClusterManagerNodes( - final DecommissionAttribute decommissionAttribute, + final DecommissionRequest decommissionRequest, ActionListener listener ) { + final DecommissionAttribute decommissionAttribute = decommissionRequest.getDecommissionAttribute(); ClusterState state = clusterService.getClusterApplierService().state(); // since here metadata is already registered with INIT, we can guarantee that no new node with decommission attribute can further // join the cluster @@ -212,7 +215,7 @@ public void onResponse(Void unused) { // and to-be-decommissioned cluster manager is no more part of Voting Configuration and no more to-be-decommission // nodes can be part of Voting Config listener.onResponse(new DecommissionResponse(true)); - failDecommissionedNodes(clusterService.getClusterApplierService().state()); + drainNodesWithDecommissionedAttribute(decommissionRequest); } } else { // explicitly calling listener.onFailure with NotClusterManagerException as the local node is not the cluster manager @@ -309,17 +312,74 @@ public void onFailure(Exception e) { } } - private void failDecommissionedNodes(ClusterState state) { - // this method ensures no matter what, we always exit from this function after clearing the voting config exclusion + void drainNodesWithDecommissionedAttribute(DecommissionRequest decommissionRequest) { + ClusterState state = clusterService.getClusterApplierService().state(); + Set decommissionedNodes = filterNodesWithDecommissionAttribute( + state, + decommissionRequest.getDecommissionAttribute(), + false + ); + + if (decommissionRequest.isNoDelay()) { + // Call to fail the decommission nodes + failDecommissionedNodes(decommissionedNodes, decommissionRequest.getDecommissionAttribute()); + } else { + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.DRAINING, new ActionListener<>() { + @Override + public void onResponse(DecommissionStatus status) { + logger.info("updated the decommission status to [{}]", status); + // set the weights + scheduleNodesDecommissionOnTimeout(decommissionedNodes, decommissionRequest.getDelayTimeout()); + } + + @Override + public void onFailure(Exception e) { + logger.error( + () -> new ParameterizedMessage( + "failed to update decommission status for attribute [{}] to [{}]", + decommissionRequest.getDecommissionAttribute().toString(), + DecommissionStatus.DRAINING + ), + e + ); + // since we are not able to update the status, we will clear the voting config exclusion we have set earlier + clearVotingConfigExclusionAndUpdateStatus(false, false); + } + }); + } + } + + void scheduleNodesDecommissionOnTimeout(Set decommissionedNodes, TimeValue timeoutForNodeDraining) { + ClusterState state = clusterService.getClusterApplierService().state(); DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); + if (decommissionAttributeMetadata == null) { + return; + } + assert decommissionAttributeMetadata.status().equals(DecommissionStatus.DRAINING) + : "Unexpected status encountered while decommissioning nodes."; + + // This method ensures no matter what, we always exit from this function after clearing the voting config exclusion DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + + // Wait for timeout to happen. Log the active connection before decommissioning of nodes. + transportService.getThreadPool().schedule(() -> { + // Log active connections. + decommissionController.getActiveRequestCountOnDecommissionedNodes(decommissionedNodes); + // Call to fail the decommission nodes + failDecommissionedNodes(decommissionedNodes, decommissionAttribute); + }, timeoutForNodeDraining, ThreadPool.Names.GENERIC); + } + + private void failDecommissionedNodes(Set decommissionedNodes, DecommissionAttribute decommissionAttribute) { + + // Weighing away is complete. We have allowed the nodes to be drained. Let's move decommission status to IN_PROGRESS. decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.IN_PROGRESS, new ActionListener<>() { @Override public void onResponse(DecommissionStatus status) { logger.info("updated the decommission status to [{}]", status); // execute nodes decommissioning decommissionController.removeDecommissionedNodes( - filterNodesWithDecommissionAttribute(clusterService.getClusterApplierService().state(), decommissionAttribute, false), + decommissionedNodes, "nodes-decommissioned", TimeValue.timeValueSeconds(120L), new ActionListener() { @@ -454,6 +514,7 @@ private static void ensureEligibleRequest( case INIT: case FAILED: break; + case DRAINING: case IN_PROGRESS: case SUCCESSFUL: msg = "same request is already in status [" + decommissionAttributeMetadata.status() + "]"; @@ -471,6 +532,7 @@ private static void ensureEligibleRequest( + decommissionAttributeMetadata.decommissionAttribute().toString() + "] already successfully decommissioned, recommission before triggering another decommission"; break; + case DRAINING: case IN_PROGRESS: case INIT: // it means the decommission has been initiated or is inflight. In that case, will fail new request @@ -582,7 +644,9 @@ public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata met DecommissionStatus status = decommissionAttributeMetadata.status(); if (decommissionAttribute != null && status != null) { if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) - && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + && (status.equals(DecommissionStatus.IN_PROGRESS) + || status.equals(DecommissionStatus.SUCCESSFUL) + || status.equals(DecommissionStatus.DRAINING))) { return false; } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java index af88b0d0f5902..4ca8c3cc4286e 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java @@ -16,6 +16,11 @@ public enum DecommissionStatus { * Decommission process is initiated, and to-be-decommissioned leader is excluded from voting config */ INIT("init"), + /** + * Decommission process is initiated, and the zone is being drained. + */ + DRAINING("draining"), + /** * Decommission process has started, decommissioned nodes should be removed */ @@ -56,6 +61,8 @@ public static DecommissionStatus fromString(String status) { } if (status.equals(INIT.status())) { return INIT; + } else if (status.equals(DRAINING.status())) { + return DRAINING; } else if (status.equals(IN_PROGRESS.status())) { return IN_PROGRESS; } else if (status.equals(SUCCESSFUL.status())) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java index 979bf05f537b7..5f1d1ba48c88b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java @@ -49,6 +49,11 @@ DecommissionRequest createRequest(RestRequest request) throws IOException { DecommissionRequest decommissionRequest = Requests.decommissionRequest(); String attributeName = request.param("awareness_attribute_name"); String attributeValue = request.param("awareness_attribute_value"); + // Check if we have no delay set. + boolean noDelay = request.paramAsBoolean("no_delay", false); + if (noDelay) { + decommissionRequest.setNoDelay(noDelay); + } return decommissionRequest.setDecommissionAttribute(new DecommissionAttribute(attributeName, attributeValue)); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java index c189b5702dea0..112609b0cf8ec 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -25,6 +26,7 @@ public void testSerialization() throws IOException { final DecommissionRequest deserialized = copyWriteable(originalRequest, writableRegistry(), DecommissionRequest::new); assertEquals(deserialized.getDecommissionAttribute(), originalRequest.getDecommissionAttribute()); + assertEquals(deserialized.getDelayTimeout(), originalRequest.getDelayTimeout()); } public void testValidation() { @@ -54,8 +56,20 @@ public void testValidation() { DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + request.setNoDelay(true); ActionRequestValidationException e = request.validate(); assertNull(e); + assertEquals(TimeValue.ZERO, request.getDelayTimeout()); + } + { + String attributeName = "zone"; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + ActionRequestValidationException e = request.validate(); + assertNull(e); + assertEquals(DecommissionRequest.DEFAULT_NODE_DRAINING_TIMEOUT, request.getDelayTimeout()); } } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 66a3b00f2979d..b5c5d30c45c47 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -233,7 +233,11 @@ public void testJoinClusterWithNoDecommission() { public void testPreventJoinClusterWithDecommission() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); - DecommissionStatus decommissionStatus = randomFrom(DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); + DecommissionStatus decommissionStatus = randomFrom( + DecommissionStatus.IN_PROGRESS, + DecommissionStatus.SUCCESSFUL, + DecommissionStatus.DRAINING + ); DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( decommissionAttribute, decommissionStatus diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 06f2de04907d6..5a76e0d5137fb 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -241,10 +241,45 @@ public void onFailure(Exception e) { } public void testSuccessfulDecommissionStatusMetadataUpdate() throws InterruptedException { + Map> decommissionStateTransitionMap = Map.of( + DecommissionStatus.INIT, + Set.of(DecommissionStatus.DRAINING, DecommissionStatus.IN_PROGRESS), + DecommissionStatus.DRAINING, + Set.of(DecommissionStatus.IN_PROGRESS), + DecommissionStatus.IN_PROGRESS, + Set.of(DecommissionStatus.SUCCESSFUL) + ); + + for (Map.Entry> entry : decommissionStateTransitionMap.entrySet()) { + for (DecommissionStatus val : entry.getValue()) { + verifyDecommissionStatusTransition(entry.getKey(), val); + } + } + } + + public void testSuccessfulDecommissionStatusMetadataUpdateForFailedState() throws InterruptedException { + Map> decommissionStateTransitionMap = Map.of( + DecommissionStatus.INIT, + Set.of(DecommissionStatus.FAILED), + DecommissionStatus.DRAINING, + Set.of(DecommissionStatus.FAILED), + DecommissionStatus.IN_PROGRESS, + Set.of(DecommissionStatus.FAILED) + ); + + for (Map.Entry> entry : decommissionStateTransitionMap.entrySet()) { + for (DecommissionStatus val : entry.getValue()) { + verifyDecommissionStatusTransition(entry.getKey(), val); + } + } + } + + private void verifyDecommissionStatusTransition(DecommissionStatus currentStatus, DecommissionStatus newStatus) + throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); DecommissionAttributeMetadata oldMetadata = new DecommissionAttributeMetadata( new DecommissionAttribute("zone", "zone-1"), - DecommissionStatus.IN_PROGRESS + currentStatus ); ClusterState state = clusterService.state(); Metadata metadata = state.metadata(); @@ -253,25 +288,23 @@ public void testSuccessfulDecommissionStatusMetadataUpdate() throws InterruptedE state = ClusterState.builder(state).metadata(mdBuilder).build(); setState(clusterService, state); - decommissionController.updateMetadataWithDecommissionStatus( - DecommissionStatus.SUCCESSFUL, - new ActionListener() { - @Override - public void onResponse(DecommissionStatus status) { - assertEquals(DecommissionStatus.SUCCESSFUL, status); - countDownLatch.countDown(); - } + decommissionController.updateMetadataWithDecommissionStatus(newStatus, new ActionListener() { + @Override + public void onResponse(DecommissionStatus status) { + assertEquals(newStatus, status); + countDownLatch.countDown(); + } - @Override - public void onFailure(Exception e) { - fail("decommission status update failed"); - } + @Override + public void onFailure(Exception e) { + fail("decommission status update failed"); + countDownLatch.countDown(); } - ); + }); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); ClusterState newState = clusterService.getClusterApplierService().state(); DecommissionAttributeMetadata decommissionAttributeMetadata = newState.metadata().decommissionAttributeMetadata(); - assertEquals(decommissionAttributeMetadata.status(), DecommissionStatus.SUCCESSFUL); + assertEquals(decommissionAttributeMetadata.status(), newStatus); } private static class AdjustConfigurationForExclusions implements ClusterStateObserver.Listener { diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index c4cf6c7cc6641..3f39d67dee765 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -16,6 +16,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.opensearch.cluster.ClusterName; @@ -32,6 +33,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -141,7 +143,7 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - decommissionService.startDecommissionAction(decommissionAttribute, listener); + decommissionService.startDecommissionAction(new DecommissionRequest(decommissionAttribute), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } @@ -168,7 +170,7 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - decommissionService.startDecommissionAction(decommissionAttribute, listener); + decommissionService.startDecommissionAction(new DecommissionRequest(decommissionAttribute), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } @@ -193,7 +195,7 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - decommissionService.startDecommissionAction(decommissionAttribute, listener); + decommissionService.startDecommissionAction(new DecommissionRequest(decommissionAttribute), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } @@ -218,7 +220,7 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - decommissionService.startDecommissionAction(decommissionAttribute, listener); + decommissionService.startDecommissionAction(new DecommissionRequest(decommissionAttribute), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } @@ -255,10 +257,62 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - decommissionService.startDecommissionAction(new DecommissionAttribute("zone", "zone_2"), listener); + DecommissionRequest request = new DecommissionRequest(new DecommissionAttribute("zone", "zone_2")); + decommissionService.startDecommissionAction(request, listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testScheduleNodesDecommissionOnTimeout() { + TransportService mockTransportService = Mockito.mock(TransportService.class); + ThreadPool mockThreadPool = Mockito.mock(ThreadPool.class); + Mockito.when(mockTransportService.getLocalNode()).thenReturn(Mockito.mock(DiscoveryNode.class)); + Mockito.when(mockTransportService.getThreadPool()).thenReturn(mockThreadPool); + DecommissionService decommissionService = new DecommissionService( + Settings.EMPTY, + clusterSettings, + clusterService, + mockTransportService, + threadPool, + allocationService + ); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.DRAINING + ); + Metadata metadata = Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build(); + ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); + + DiscoveryNode decommissionedNode1 = Mockito.mock(DiscoveryNode.class); + DiscoveryNode decommissionedNode2 = Mockito.mock(DiscoveryNode.class); + + setState(clusterService, state); + decommissionService.scheduleNodesDecommissionOnTimeout( + Set.of(decommissionedNode1, decommissionedNode2), + DecommissionRequest.DEFAULT_NODE_DRAINING_TIMEOUT + ); + + Mockito.verify(mockThreadPool).schedule(Mockito.any(Runnable.class), Mockito.any(TimeValue.class), Mockito.anyString()); + } + + public void testDrainNodesWithDecommissionedAttributeWithNoDelay() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.INIT + ); + + Metadata metadata = Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build(); + ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); + + DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + request.setNoDelay(true); + + setState(clusterService, state); + decommissionService.drainNodesWithDecommissionedAttribute(request); + + } + public void testClearClusterDecommissionState() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java index b724de0bd5cc6..bbb21ff8f816c 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java @@ -32,12 +32,43 @@ public void testCreateRequest() throws IOException { Map params = new HashMap<>(); params.put("awareness_attribute_name", "zone"); params.put("awareness_attribute_value", "zone-1"); + params.put("draining_timeout", "60s"); RestRequest deprecatedRequest = buildRestRequest(params); DecommissionRequest request = action.createRequest(deprecatedRequest); assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getDelayTimeout().getSeconds(), 120); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + + public void testCreateRequestWithDefaultTimeout() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getDelayTimeout().getSeconds(), DecommissionRequest.DEFAULT_NODE_DRAINING_TIMEOUT.getSeconds()); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + + public void testCreateRequestWithNoDelay() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + params.put("no_delay", "true"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getDelayTimeout().getSeconds(), 0); assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); } From 2268af295f6ea7cf5812e88a635a03ee306e7ad1 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 25 Oct 2022 12:25:07 -0400 Subject: [PATCH 091/122] Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 (#4889) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 3 ++- buildSrc/version.properties | 6 ++++-- modules/lang-expression/licenses/asm-9.3.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.4.jar.sha1 | 1 + modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 | 1 - modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.3.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.4.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.3.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.4.jar.sha1 | 1 + 18 files changed, 14 insertions(+), 11 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.3.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.4.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.3.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.4.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.3.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7459e3892a054..080f8524c3b09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,8 +71,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) - Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#4790](https://github.com/opensearch-project/OpenSearch/pull/4790)) - Bump reactor-netty-http to 1.0.24 in repository-azure ([#4880](https://github.com/opensearch-project/OpenSearch/pull/4880)) -- Bumps `protobuf-java` from 3.21.7 to 3.21.8 +- Bumps `protobuf-java` from 3.21.7 to 3.21.8 ([#4886](https://github.com/opensearch-project/OpenSearch/pull/4886)) - Upgrade netty to 4.1.84.Final ([#4893](https://github.com/opensearch-project/OpenSearch/pull/4893)) +- Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 ([#4889](https://github.com/opensearch-project/OpenSearch/pull/4889)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4704e0c71880a..8ff74b40cc2eb 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -14,9 +14,10 @@ jackson_databind = 2.13.4.2 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 +# Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) log4j = 2.17.1 slf4j = 1.7.36 -asm = 9.3 +asm = 9.4 jettison = 1.5.1 # when updating the JNA version, also update the version in buildSrc/build.gradle @@ -45,9 +46,10 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 +# Update to 4.8.0 is using reflection without SecurityManager checks (fails with java.security.AccessControlException) mockito = 4.7.0 objenesis = 3.2 -bytebuddy = 1.12.12 +bytebuddy = 1.12.18 # benchmark dependencies jmh = 1.35 diff --git a/modules/lang-expression/licenses/asm-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-9.3.jar.sha1 deleted file mode 100644 index 71d3966a6f6f9..0000000000000 --- a/modules/lang-expression/licenses/asm-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-9.4.jar.sha1 new file mode 100644 index 0000000000000..75f2b0fe9a112 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.4.jar.sha1 @@ -0,0 +1 @@ +b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 deleted file mode 100644 index fd7cd4943a57c..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 new file mode 100644 index 0000000000000..e0e2a2f4e63e9 --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 @@ -0,0 +1 @@ +8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 deleted file mode 100644 index 238f0006424d3..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 new file mode 100644 index 0000000000000..50ce6d740aab7 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 @@ -0,0 +1 @@ +a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-9.3.jar.sha1 deleted file mode 100644 index 71d3966a6f6f9..0000000000000 --- a/modules/lang-painless/licenses/asm-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-9.4.jar.sha1 new file mode 100644 index 0000000000000..75f2b0fe9a112 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.4.jar.sha1 @@ -0,0 +1 @@ +b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 deleted file mode 100644 index f5a04d0196823..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b071f211b37c38e0e9f5998550197c8593f6ad8 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 new file mode 100644 index 0000000000000..850a070775e4d --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 @@ -0,0 +1 @@ +0a5fec9dfc039448d4fd098fbaffcaf55373b223 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 deleted file mode 100644 index fd7cd4943a57c..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 new file mode 100644 index 0000000000000..e0e2a2f4e63e9 --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 @@ -0,0 +1 @@ +8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 deleted file mode 100644 index 238f0006424d3..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 new file mode 100644 index 0000000000000..50ce6d740aab7 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 @@ -0,0 +1 @@ +a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 deleted file mode 100644 index 8859c317794ba..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9595bc05510d0bd4b610188b77333fe4851a1975 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 new file mode 100644 index 0000000000000..8c5854f41bcda --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 @@ -0,0 +1 @@ +ab1e0a84b72561dbaf1ee260321e72148ebf4b19 \ No newline at end of file From 0f477a2d0aae4d75313a7f26407cda8594bd981e Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Tue, 25 Oct 2022 16:58:31 +0000 Subject: [PATCH 092/122] Fix a bug on handling an invalid array value for point type field (#4900) With this commit, appropriate exception is thrown when an array of four values are provided for point type field. Signed-off-by: Heemin Kim Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../mapper/AbstractPointGeometryFieldMapper.java | 15 +++++++++------ .../index/mapper/GeoPointFieldMapperTests.java | 6 ++++++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 080f8524c3b09..1ab3537ac1551 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -165,6 +165,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) - Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) +- Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java index 305351ca63b9a..b5db3546185ba 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -245,6 +245,7 @@ default boolean isNormalizable(double coord) { * @opensearch.internal */ public static class PointParser

    extends Parser> { + private static final int MAX_NUMBER_OF_VALUES_IN_ARRAY_FORMAT = 3; /** * Note that this parser is only used for formatting values. */ @@ -287,7 +288,7 @@ public List

    parse(XContentParser parser) throws IOException, ParseException { if (parser.currentToken() == XContentParser.Token.START_ARRAY) { parser.nextToken(); if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { - XContentBuilder xContentBuilder = reConstructArrayXContent(parser); + XContentBuilder xContentBuilder = reconstructArrayXContent(parser); try ( XContentParser subParser = createParser( parser.getXContentRegistry(), @@ -328,18 +329,20 @@ private XContentParser createParser( return subParser; } - private XContentBuilder reConstructArrayXContent(XContentParser parser) throws IOException { + private XContentBuilder reconstructArrayXContent(XContentParser parser) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startArray(); - int count = 0; + int numberOfValuesAdded = 0; while (parser.currentToken() != XContentParser.Token.END_ARRAY) { - if (++count > 3) { - break; - } if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { throw new OpenSearchParseException("numeric value expected"); } builder.value(parser.doubleValue()); parser.nextToken(); + + // Allows one more value to be added so that the error case can be handled by a parser + if (++numberOfValuesAdded > MAX_NUMBER_OF_VALUES_IN_ARRAY_FORMAT) { + break; + } } builder.endArray(); return builder; diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 1b4c95d9ceb8f..060901a3eba38 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -164,6 +164,12 @@ public void testLatLonInOneValueArray() throws Exception { assertThat(doc.rootDoc().getFields("field"), arrayWithSize(4)); } + public void testLatLonInArrayMoreThanThreeValues() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", true))); + Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.array("field", 1.2, 1.3, 1.4, 1.5)))); + assertThat(e.getCause().getMessage(), containsString("[geo_point] field type does not accept more than 3 values")); + } + public void testLonLatArray() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); ParsedDocument doc = mapper.parse(source(b -> b.startArray("field").value(1.3).value(1.2).endArray())); From 8c9ca4e858e6333265080972cf57809dbc086208 Mon Sep 17 00:00:00 2001 From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Date: Tue, 25 Oct 2022 19:35:41 +0200 Subject: [PATCH 093/122] build no-jdk distributions as part of release build (#4902) with commit e3572464859 the `build.sh` has been moved to this repository and with commit dfa1118eee8 the missing no-jdk builds have been added. with these preliminary works done we can now start building the no-jdk distributions as part of the release build so that they can then be published. this is part of opensearch-project/opensearch-build#99. Signed-off-by: Ralph Ursprung Signed-off-by: Ralph Ursprung Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 2 ++ scripts/build.sh | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ab3537ac1551..ccee7bf050c0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) - Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) - Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) +- Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/scripts/build.sh b/scripts/build.sh index a0917776507be..16906bf39fbc7 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -139,7 +139,7 @@ esac echo "Building OpenSearch for $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" -./gradlew :distribution:$TYPE:$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER +./gradlew :distribution:$TYPE:$TARGET:assemble :distribution:$TYPE:no-jdk-$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER # Copy artifact to dist folder in bundle build output [[ "$SNAPSHOT" == "true" ]] && IDENTIFIER="-SNAPSHOT" From ae7a4e91f9e9f7aa2c2715f4f5a5e8391cfffeb9 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 26 Oct 2022 08:06:41 -0400 Subject: [PATCH 094/122] Update Apache Lucene to 9.4.1 (#4922) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 | 1 + .../licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 | 1 + .../analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 | 1 + .../licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 | 1 + .../licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 | 1 + .../licenses/lucene-analysis-stempel-9.4.0.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.4.1.jar.sha1 | 1 + .../licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 | 1 + server/licenses/lucene-analysis-common-9.4.0.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.4.1.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 | 1 + server/licenses/lucene-core-9.4.0.jar.sha1 | 1 - server/licenses/lucene-core-9.4.1.jar.sha1 | 1 + server/licenses/lucene-grouping-9.4.0.jar.sha1 | 1 - server/licenses/lucene-grouping-9.4.1.jar.sha1 | 1 + server/licenses/lucene-highlighter-9.4.0.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.4.1.jar.sha1 | 1 + server/licenses/lucene-join-9.4.0.jar.sha1 | 1 - server/licenses/lucene-join-9.4.1.jar.sha1 | 1 + server/licenses/lucene-memory-9.4.0.jar.sha1 | 1 - server/licenses/lucene-memory-9.4.1.jar.sha1 | 1 + server/licenses/lucene-misc-9.4.0.jar.sha1 | 1 - server/licenses/lucene-misc-9.4.1.jar.sha1 | 1 + server/licenses/lucene-queries-9.4.0.jar.sha1 | 1 - server/licenses/lucene-queries-9.4.1.jar.sha1 | 1 + server/licenses/lucene-queryparser-9.4.0.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.4.1.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.4.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.4.1.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 | 1 + server/licenses/lucene-spatial3d-9.4.0.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.4.1.jar.sha1 | 1 + server/licenses/lucene-suggest-9.4.0.jar.sha1 | 1 - server/licenses/lucene-suggest-9.4.1.jar.sha1 | 1 + server/src/main/java/org/opensearch/Version.java | 2 +- 47 files changed, 25 insertions(+), 24 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.4.1.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.4.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.4.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ccee7bf050c0a..1795e8971704d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `protobuf-java` from 3.21.7 to 3.21.8 ([#4886](https://github.com/opensearch-project/OpenSearch/pull/4886)) - Upgrade netty to 4.1.84.Final ([#4893](https://github.com/opensearch-project/OpenSearch/pull/4893)) - Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 ([#4889](https://github.com/opensearch-project/OpenSearch/pull/4889)) +- Update Apache Lucene to 9.4.1 ([#4922](https://github.com/opensearch-project/OpenSearch/pull/4922)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8ff74b40cc2eb..ccd90bcf7d802 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.4.0 +lucene = 9.4.1 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.4+8 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 deleted file mode 100644 index 2b647c1270e14..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19749e264805171009836cbedecc5494b13cd920 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..f8eb0a7bdb033 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 @@ -0,0 +1 @@ +574ac602f98475c1d061adee71ed90dce2eee679 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 deleted file mode 100644 index 0038e3153b139..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa0f250558375922f3091820361156e514fe1842 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..77c20d4ed8073 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 @@ -0,0 +1 @@ +b217251a8c13669ec4066871d026e4cb84abf702 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 deleted file mode 100644 index ec8c78a580543..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32eb1ad367ab1289804aeed95ea7216711a7764d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..b2090016c9f5d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 @@ -0,0 +1 @@ +d80cf8993498784bcb6e4dcefdcebd3ddc4ca022 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 deleted file mode 100644 index 438585ee3afa8..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63661714be65f882a921d281965b0779fd487b90 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..2f5d27452f2f3 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 @@ -0,0 +1 @@ +082f4870f58b17bcc60dedb399fe82964ef0ad6f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 deleted file mode 100644 index 019a98dc594b3..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1034d876551fc21f7835b456dab01db21b9a4af6 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..146d5eebbe800 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 @@ -0,0 +1 @@ +733411a30c1b1b473b93d4b28f59f8914d7ed83f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 deleted file mode 100644 index cc8e31b7a248f..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f704ee4b14e2fe2622bb983f04b36a32df8fd4a7 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..61f11cfb32aee --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 @@ -0,0 +1 @@ +19d011e47bab7ccd1389095834d18ad7bf98f050 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 deleted file mode 100644 index 25e115b84308d..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a95ff17b51da6b3da641fa4053e5ee9ea2ff5daf \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..9259308e80d4b --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 @@ -0,0 +1 @@ +7e48a8d411fffb331484c9485a6bdbf1d856ba06 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 deleted file mode 100644 index 7cc4a7131f866..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13e1ae2c760d8c0d7990ffe3296e46d9d8e6f842 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..77f7bdd37cd8f --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 @@ -0,0 +1 @@ +ec94415a249a7814fa76d96667173fae875112a2 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 deleted file mode 100644 index d4db2877c486b..0000000000000 --- a/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02fbd4e87241411fcf5d34e92a50bee46ab164dc \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..dcee87ac5273a --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 @@ -0,0 +1 @@ +692cbd2e220222adff279e922d1c14bb10dae8b0 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 deleted file mode 100644 index 1b7b53ef9fe70..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -259863dfd107645de6146b3c87b4ecee66a4d43d \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..927cdf30dce6f --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 @@ -0,0 +1 @@ +2826b0fd18033b6d1625b8313ab28c7f43fac692 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0.jar.sha1 b/server/licenses/lucene-core-9.4.0.jar.sha1 deleted file mode 100644 index 66f7f24485172..0000000000000 --- a/server/licenses/lucene-core-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cca1116f813c0f0c63acfac4c952baf29d46d76b \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.1.jar.sha1 b/server/licenses/lucene-core-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..8a12d131b158e --- /dev/null +++ b/server/licenses/lucene-core-9.4.1.jar.sha1 @@ -0,0 +1 @@ +48ff655927016066d4746b485d63f0586d859c5f \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0.jar.sha1 b/server/licenses/lucene-grouping-9.4.0.jar.sha1 deleted file mode 100644 index fe79c0efd34e4..0000000000000 --- a/server/licenses/lucene-grouping-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51bec1d5acc8ecaf9f50e047d3f86d60c7a958f4 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.1.jar.sha1 b/server/licenses/lucene-grouping-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..c6507cada010f --- /dev/null +++ b/server/licenses/lucene-grouping-9.4.1.jar.sha1 @@ -0,0 +1 @@ +690585f53f4ea00fae94c72fbf711f693a3981c5 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0.jar.sha1 deleted file mode 100644 index 54700f08a3fdb..0000000000000 --- a/server/licenses/lucene-highlighter-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8cf8c9308d8fb18a927c7ed267a14ace3990a5f \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.1.jar.sha1 b/server/licenses/lucene-highlighter-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..58019cc9702d0 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.4.1.jar.sha1 @@ -0,0 +1 @@ +e97e3003a80c67ddf33a456ab54413b12764d23d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0.jar.sha1 b/server/licenses/lucene-join-9.4.0.jar.sha1 deleted file mode 100644 index 752006d3a66dd..0000000000000 --- a/server/licenses/lucene-join-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99b2d3c8e137a6853a2503456897d47d4f18974b \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.1.jar.sha1 b/server/licenses/lucene-join-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..2f0765f2b0a3c --- /dev/null +++ b/server/licenses/lucene-join-9.4.1.jar.sha1 @@ -0,0 +1 @@ +8749a1b71eacb85c47ed90265ea98c1c8b4e5abd \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0.jar.sha1 b/server/licenses/lucene-memory-9.4.0.jar.sha1 deleted file mode 100644 index 27b488699968e..0000000000000 --- a/server/licenses/lucene-memory-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -881cb214e79da14de35cb0e8e6779d2722828a96 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.1.jar.sha1 b/server/licenses/lucene-memory-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..8a8674b776623 --- /dev/null +++ b/server/licenses/lucene-memory-9.4.1.jar.sha1 @@ -0,0 +1 @@ +89797042e65179325f40c173fc20e227d3df4f29 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0.jar.sha1 b/server/licenses/lucene-misc-9.4.0.jar.sha1 deleted file mode 100644 index f9924475b9acd..0000000000000 --- a/server/licenses/lucene-misc-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a126123e482e6bf2e7aea670d221a2a39d3277dc \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.1.jar.sha1 b/server/licenses/lucene-misc-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..3c4e6f89400bc --- /dev/null +++ b/server/licenses/lucene-misc-9.4.1.jar.sha1 @@ -0,0 +1 @@ +ae8465d5499ab39012c3c81d82014667c5c03dd2 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0.jar.sha1 b/server/licenses/lucene-queries-9.4.0.jar.sha1 deleted file mode 100644 index 65e441bfdaf90..0000000000000 --- a/server/licenses/lucene-queries-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe74dbfe9dba9ee9ee2cb80f151fde97fb4efd12 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.1.jar.sha1 b/server/licenses/lucene-queries-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..f897fda0074bf --- /dev/null +++ b/server/licenses/lucene-queries-9.4.1.jar.sha1 @@ -0,0 +1 @@ +c78e99a46fabd2a7a175fe170a1537de2b9bca95 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0.jar.sha1 deleted file mode 100644 index 2d454942d52e1..0000000000000 --- a/server/licenses/lucene-queryparser-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13f108a8572fcf0670c7df3ba8dbe1076d0e0dbe \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.1.jar.sha1 b/server/licenses/lucene-queryparser-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..f82637ff48a42 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.4.1.jar.sha1 @@ -0,0 +1 @@ +98051f363cd92fd324aca47b662c4a71b9419e49 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0.jar.sha1 deleted file mode 100644 index 4ebcf3f6edc8f..0000000000000 --- a/server/licenses/lucene-sandbox-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7a676a12ea50dcbf64564f4e4022f939f0a627d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.1.jar.sha1 b/server/licenses/lucene-sandbox-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..210fc28c4d45a --- /dev/null +++ b/server/licenses/lucene-sandbox-9.4.1.jar.sha1 @@ -0,0 +1 @@ +6c82a38b0fe1042ef11751cdbb1be689b7a00428 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 deleted file mode 100644 index c0f181ad19eb6..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d956d1cb1458c51967af1c4acadd2a1f92634d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..b1430cf47a867 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 @@ -0,0 +1 @@ +7171406be9b864a4929edff8601b3d69645c64ae \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 deleted file mode 100644 index 3414f36b02bef..0000000000000 --- a/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76887ca708f23b13613e45fb9e307c548b22c6da \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..482526b6032b6 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 @@ -0,0 +1 @@ +5ede97d83f0c62ba84ae1e2279f411ae87ecaca1 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0.jar.sha1 b/server/licenses/lucene-suggest-9.4.0.jar.sha1 deleted file mode 100644 index 563a0b5ad966b..0000000000000 --- a/server/licenses/lucene-suggest-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -406c9c539f262449d3b1e57e7bc4302efeecaf6c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.1.jar.sha1 b/server/licenses/lucene-suggest-9.4.1.jar.sha1 new file mode 100644 index 0000000000000..ed4e8c4e3ca66 --- /dev/null +++ b/server/licenses/lucene-suggest-9.4.1.jar.sha1 @@ -0,0 +1 @@ +60e7740d2894ac7a0e97c3eabaee4823a02b0f9c \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 3387eee2dffc8..8020ae97d456a 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -100,7 +100,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { From d96da5026468eb3dabbd12d4763d40a52b980440 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Wed, 26 Oct 2022 09:14:18 -0400 Subject: [PATCH 095/122] Renamed to flaky test. (#4912) Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Daniel (dB.) Doubrovkine --- .github/workflows/gradle-check.yml | 5 ++--- CHANGELOG.md | 5 +++-- DEVELOPER_GUIDE.md | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 3089a595c6c18..a630fc8d9fcc2 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -91,6 +91,5 @@ jobs: * **RESULT:** ${{ env.result }} :x: * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure below. - [Flakey test?](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flakey-tests) - Iterate to green. + Please examine the workflow log, locate, and copy-paste the failure below, then iterate to green. + Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? diff --git a/CHANGELOG.md b/CHANGELOG.md index 1795e8971704d..1230ef2ba402d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add icon for IntelliJidea toolbox ([#4882](https://github.com/opensearch-project/OpenSearch/pull/4882)) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) - Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) -- Add dev guide for dealing with flakey tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) +- Add dev guide for dealing with flaky tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) - Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) - Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) - Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) @@ -44,7 +44,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) - Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) - Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) - +- Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 78b926f8ba18b..313aecd62f5f9 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -53,7 +53,7 @@ - [Backports](#backports) - [LineLint](#linelint) - [Lucene Snapshots](#lucene-snapshots) - - [Flakey Tests](#flakey-tests) + - [Flaky Tests](#flaky-tests) # Developer Guide @@ -543,15 +543,15 @@ Pass a list of files or directories to limit your search. The Github workflow in [lucene-snapshots.yml](.github/workflows/lucene-snapshots.yml) is a Github worfklow executable by maintainers to build a top-down snapshot build of lucene. These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. Example: `lucene = 10.0.0-snapshot-2e941fc`. -### Flakey Tests +### Flaky Tests -OpenSearch has a very large test suite with long running, often failing (flakey), integration tests. Such individual tests are labelled as [Flakey Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22Flakey+Random+Test+Failure%22). Your help is wanted fixing these! +OpenSearch has a very large test suite with long running, often failing (flaky), integration tests. Such individual tests are labelled as [Flaky Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22). Your help is wanted fixing these! -If you encounter a build/test failure in CI that is unrelated to the change in your pull request, it may be a known flakey test, or a new test failure. +If you encounter a build/test failure in CI that is unrelated to the change in your pull request, it may be a known flaky test, or a new test failure. 1. Follow failed CI links, and locate the failing test(s). 2. Copy-paste the failure into a comment of your PR. -3. Search through [issues](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22Flakey+Random+Test+Failure%22) using the name of the failed test for whether this is a known flakey test. +3. Search through [issues](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22) using the name of the failed test for whether this is a known flaky test. 5. If an existing issue is found, paste a link to the known issue in a comment to your PR. 6. If no existing issue is found, open one. 7. Retry CI via the GitHub UX or by pushing an update to your PR. From 3887b6095275595326a9331ac1082b6155ccfc19 Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Wed, 26 Oct 2022 21:46:56 +0530 Subject: [PATCH 096/122] Add delay timeout for decommission request (#4931) * Add delay timeout for decommission request Signed-off-by: pranikum <109206473+pranikum@users.noreply.github.com> --- CHANGELOG.md | 2 +- .../awareness/put/DecommissionRequest.java | 10 +++++++++- .../awareness/put/DecommissionRequestBuilder.java | 11 +++++++++++ .../admin/cluster/RestDecommissionAction.java | 8 ++++++-- .../awareness/put/DecommissionRequestTests.java | 12 ++++++++++++ .../cluster/RestDecommissionActionTests.java | 15 +++++++++++++++ 6 files changed, 54 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1230ef2ba402d..42208bf11bcae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,7 +170,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) - Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) - Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) - +- [BUG]: Allow decommission to support delay timeout [#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index e2fb353b6c749..ae96c8ddb2fde 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -48,6 +48,7 @@ public DecommissionRequest(StreamInput in) throws IOException { super(in); decommissionAttribute = new DecommissionAttribute(in); this.delayTimeout = in.readTimeValue(); + this.noDelay = in.readBoolean(); } @Override @@ -55,6 +56,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); decommissionAttribute.writeTo(out); out.writeTimeValue(delayTimeout); + out.writeBoolean(noDelay); } /** @@ -75,12 +77,18 @@ public DecommissionAttribute getDecommissionAttribute() { return this.decommissionAttribute; } + public void setDelayTimeout(TimeValue delayTimeout) { + this.delayTimeout = delayTimeout; + } + public TimeValue getDelayTimeout() { return this.delayTimeout; } public void setNoDelay(boolean noDelay) { - this.delayTimeout = TimeValue.ZERO; + if (noDelay) { + this.delayTimeout = TimeValue.ZERO; + } this.noDelay = noDelay; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java index 47af3b952c895..1c7a03fa10e76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -12,6 +12,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; /** * Register decommission request builder @@ -35,4 +36,14 @@ public DecommissionRequestBuilder setDecommissionedAttribute(DecommissionAttribu request.setDecommissionAttribute(decommissionAttribute); return this; } + + public DecommissionRequestBuilder setDelayTimeOut(TimeValue delayTimeOut) { + request.setDelayTimeout(delayTimeOut); + return this; + } + + public DecommissionRequestBuilder setNoDelay(boolean noDelay) { + request.setNoDelay(noDelay); + return this; + } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java index 5f1d1ba48c88b..c041974165eb6 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDecommissionAction.java @@ -12,6 +12,7 @@ import org.opensearch.client.Requests; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -51,8 +52,11 @@ DecommissionRequest createRequest(RestRequest request) throws IOException { String attributeValue = request.param("awareness_attribute_value"); // Check if we have no delay set. boolean noDelay = request.paramAsBoolean("no_delay", false); - if (noDelay) { - decommissionRequest.setNoDelay(noDelay); + decommissionRequest.setNoDelay(noDelay); + + if (request.hasParam("delay_timeout")) { + TimeValue delayTimeout = request.paramAsTime("delay_timeout", DecommissionRequest.DEFAULT_NODE_DRAINING_TIMEOUT); + decommissionRequest.setDelayTimeout(delayTimeout); } return decommissionRequest.setDecommissionAttribute(new DecommissionAttribute(attributeName, attributeValue)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java index 112609b0cf8ec..8cd407b3aecf2 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestTests.java @@ -71,5 +71,17 @@ public void testValidation() { assertNull(e); assertEquals(DecommissionRequest.DEFAULT_NODE_DRAINING_TIMEOUT, request.getDelayTimeout()); } + { + String attributeName = "zone"; + String attributeValue = "test"; + DecommissionAttribute decommissionAttribute = new DecommissionAttribute(attributeName, attributeValue); + + final DecommissionRequest request = new DecommissionRequest(decommissionAttribute); + request.setNoDelay(true); + request.setDelayTimeout(TimeValue.timeValueSeconds(30)); + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertTrue(e.getMessage().contains("Invalid decommission request")); + } } } diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java index bbb21ff8f816c..b5f61f751b19f 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java @@ -72,6 +72,21 @@ public void testCreateRequestWithNoDelay() throws IOException { assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); } + public void testCreateRequestWithDelayTimeout() throws IOException { + Map params = new HashMap<>(); + params.put("awareness_attribute_name", "zone"); + params.put("awareness_attribute_value", "zone-1"); + params.put("delay_timeout", "300s"); + + RestRequest deprecatedRequest = buildRestRequest(params); + + DecommissionRequest request = action.createRequest(deprecatedRequest); + assertEquals(request.getDecommissionAttribute().attributeName(), "zone"); + assertEquals(request.getDecommissionAttribute().attributeValue(), "zone-1"); + assertEquals(request.getDelayTimeout().getSeconds(), 300); + assertEquals(deprecatedRequest.getHttpRequest().method(), RestRequest.Method.PUT); + } + private FakeRestRequest buildRestRequest(Map params) { return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) .withPath("/_cluster/decommission/awareness/{awareness_attribute_name}/{awareness_attribute_value}") From 782dc592419ec36779b1e699131a07c7cb59f88b Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Wed, 26 Oct 2022 16:11:07 -0700 Subject: [PATCH 097/122] Fix failing test: VerifyVersionConstantsIT (#4946) * Fix failing test: VerifyVersionConstantsIT Signed-off-by: Poojita Raj * add changelog Signed-off-by: Poojita Raj Signed-off-by: Poojita Raj --- CHANGELOG.md | 5 +++-- server/src/main/java/org/opensearch/Version.java | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42208bf11bcae..4701894f067c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) - Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) - Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) - + ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 @@ -170,7 +170,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) - Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) - Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) -- [BUG]: Allow decommission to support delay timeout [#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) +- [BUG]: Allow decommission to support delay timeout ([#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) +- Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 8020ae97d456a..99b72a5332bcf 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -99,7 +99,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version CURRENT = V_3_0_0; From 722fbfdef5ca2995e1a2a091fc20677260b3a0c0 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Wed, 26 Oct 2022 17:16:52 -0700 Subject: [PATCH 098/122] Upgrade woodstox (#4947) * Bump woodstox-core to 6.4.0 Signed-off-by: Vacha Shah * Update CHANGELOG Signed-off-by: Vacha Shah Signed-off-by: Vacha Shah --- CHANGELOG.md | 1 + buildSrc/version.properties | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 | 1 - plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 | 1 + test/fixtures/hdfs-fixture/build.gradle | 1 + 9 files changed, 7 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 create mode 100644 plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4701894f067c2..c60d79679a9b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Upgrade netty to 4.1.84.Final ([#4893](https://github.com/opensearch-project/OpenSearch/pull/4893)) - Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 ([#4889](https://github.com/opensearch-project/OpenSearch/pull/4889)) - Update Apache Lucene to 9.4.1 ([#4922](https://github.com/opensearch-project/OpenSearch/pull/4922)) +- Bump `woodstox-core` to 6.4.0 ([#4947](https://github.com/opensearch-project/OpenSearch/pull/4947)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index ccd90bcf7d802..f224c7d0ecc38 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -19,6 +19,7 @@ log4j = 2.17.1 slf4j = 1.7.36 asm = 9.4 jettison = 1.5.1 +woodstox = 6.4.0 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.12.1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 87f3a25d3036b..063851b3a7edf 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" api 'org.codehaus.woodstox:stax2-api:4.2.1' - implementation 'com.fasterxml.woodstox:woodstox-core:6.2.8' + implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly 'com.google.guava:guava:31.1-jre' api 'org.apache.commons:commons-lang3:3.12.0' testImplementation project(':test:fixtures:azure-fixture') diff --git a/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 b/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 deleted file mode 100644 index ae65cdebf26de..0000000000000 --- a/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -670748292899c53b1963730d9eb7f8ab71314e90 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 b/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 new file mode 100644 index 0000000000000..cac5f37205956 --- /dev/null +++ b/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 @@ -0,0 +1 @@ +c47579857bbf12c85499f431d4ecf27d77976b7c \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 07dc8b8547b80..f6363f24ff783 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -85,7 +85,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.3.1' + implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 deleted file mode 100644 index fb4e67404cd42..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf29b07ca4dd81ef3c0bc18c8bd5617510a81c5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 new file mode 100644 index 0000000000000..cac5f37205956 --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 @@ -0,0 +1 @@ +c47579857bbf12c85499f431d4ecf27d77976b7c \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b35cb7eb1cfd5..6c00465b6c842 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -46,6 +46,7 @@ dependencies { api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:3.21.8" From 04757607c5aead788b465c77cec6ef459720f625 Mon Sep 17 00:00:00 2001 From: Sergey Nuyanzin Date: Thu, 27 Oct 2022 15:35:41 +0200 Subject: [PATCH 099/122] Remove unused private methods (#4926) * Remove unused private methods Signed-off-by: Sergey Nuyanzin * add record to changelog Signed-off-by: Sergey Nuyanzin Signed-off-by: Sergey Nuyanzin --- CHANGELOG.md | 1 + .../gradle/info/GlobalBuildInfoPlugin.java | 9 --- .../precommit/ForbiddenPatternsTaskTests.java | 4 -- .../org/opensearch/painless/antlr/Walker.java | 4 -- .../index/reindex/RoundTripTests.java | 19 ----- .../azure/AzureBlobContainerRetriesTests.java | 25 ------- .../qa/die_with_dignity/DieWithDignityIT.java | 9 --- .../org/opensearch/upgrades/RecoveryIT.java | 44 ------------ .../search/query/QueryPhaseTests.java | 72 ------------------- .../decider/DiskThresholdDeciderIT.java | 8 --- .../aggregations/bucket/MultiTermsIT.java | 3 - .../segments/IndicesSegmentResponse.java | 18 ----- .../cluster/node/DiscoveryNode.java | 21 ------ .../org/opensearch/index/engine/Segment.java | 13 ---- .../index/mapper/DocumentMapperParser.java | 14 ---- .../index/query/ExistsQueryBuilder.java | 8 --- .../org/opensearch/ingest/IngestMetadata.java | 4 -- .../snapshots/SnapshotsService.java | 40 ----------- .../rollover/RolloverRequestTests.java | 19 ----- .../NoClusterManagerBlockServiceTests.java | 7 -- .../DecommissionServiceTests.java | 6 -- .../MetadataCreateIndexServiceTests.java | 10 --- .../joda/JavaJodaTimeDuellingTests.java | 13 ---- .../index/engine/InternalEngineTests.java | 11 --- .../index/mapper/DateFieldTypeTests.java | 5 -- .../index/translog/TranslogTests.java | 4 -- .../test/OpenSearchIntegTestCase.java | 38 ---------- 27 files changed, 1 insertion(+), 428 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c60d79679a9b0..3f948e0f3d8d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) - Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) - Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index 166d8e3269d70..62e743d513193 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -51,7 +51,6 @@ import java.io.File; import java.io.FileInputStream; import java.io.IOException; -import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -220,14 +219,6 @@ private List getAvailableJavaVersions(JavaVersion minimumCompilerVersi return javaVersions; } - private static boolean isCurrentJavaHome(File javaHome) { - try { - return Files.isSameFile(javaHome.toPath(), Jvm.current().getJavaHome().toPath()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - private static String getTestSeed() { String testSeedProperty = System.getProperty("tests.seed"); final String testSeed; diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java index ea4db8954bca4..6ce2e70f68381 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java @@ -104,10 +104,6 @@ private ForbiddenPatternsTask createTask(Project project) { return project.getTasks().create("forbiddenPatterns", ForbiddenPatternsTask.class); } - private ForbiddenPatternsTask createTask(Project project, String taskName) { - return project.getTasks().create(taskName, ForbiddenPatternsTask.class); - } - private void writeSourceFile(Project project, String name, String... lines) throws IOException { File file = new File(project.getProjectDir(), name); file.getParentFile().mkdirs(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java index 719a69a9977e7..c03b4199ce8d9 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java @@ -248,10 +248,6 @@ private Location location(ParserRuleContext ctx) { return new Location(sourceName, ctx.getStart().getStartIndex()); } - private Location location(TerminalNode tn) { - return new Location(sourceName, tn.getSymbol().getStartIndex()); - } - @Override public ANode visitSource(SourceContext ctx) { List functions = new ArrayList<>(); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java index 6239946852cf8..edd301603250a 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java @@ -152,25 +152,6 @@ private void randomRequest(AbstractBulkIndexByScrollRequest request) { request.setScript(random().nextBoolean() ? null : randomScript()); } - private void assertRequestEquals(Version version, ReindexRequest request, ReindexRequest tripped) { - assertRequestEquals((AbstractBulkIndexByScrollRequest) request, (AbstractBulkIndexByScrollRequest) tripped); - assertEquals(request.getDestination().version(), tripped.getDestination().version()); - assertEquals(request.getDestination().index(), tripped.getDestination().index()); - if (request.getRemoteInfo() == null) { - assertNull(tripped.getRemoteInfo()); - } else { - assertNotNull(tripped.getRemoteInfo()); - assertEquals(request.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme()); - assertEquals(request.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost()); - assertEquals(request.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery()); - assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); - assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); - assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); - } - } - private void assertRequestEquals(AbstractBulkIndexByScrollRequest request, AbstractBulkIndexByScrollRequest tripped) { assertRequestEquals((AbstractBulkByScrollRequest) request, (AbstractBulkByScrollRequest) tripped); assertEquals(request.getScript(), tripped.getScript()); diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 1478f48f16650..9ebebc5b25a3e 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -61,7 +61,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -484,28 +483,4 @@ private static Optional getRangeEnd(HttpExchange exchange) { return Optional.of(Math.toIntExact(rangeEnd)); } - private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { - final int rangeStart = getRangeStart(exchange); - assertThat(rangeStart, lessThan(bytes.length)); - final Optional rangeEnd = getRangeEnd(exchange); - final int length; - if (rangeEnd.isPresent()) { - // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1); - length = effectiveRangeEnd - rangeStart; - } else { - length = bytes.length - rangeStart - 1; - } - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, length); - final int bytesToSend = randomIntBetween(0, length - 1); - if (bytesToSend > 0) { - exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); - } - if (randomBoolean()) { - exchange.getResponseBody().flush(); - } - } } diff --git a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java index ec891ef8d44ef..aedb05e8dcbd5 100644 --- a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java @@ -98,15 +98,6 @@ public void testDieWithDignity() throws Exception { } } - private boolean containsAll(String line, String... subStrings) { - for (String subString : subStrings) { - if (line.matches(subString) == false) { - return false; - } - } - return true; - } - private void debugLogs(Path path) throws IOException { try (BufferedReader reader = Files.newBufferedReader(path)) { reader.lines().forEach(line -> logger.info(line)); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 4fd82af9603a9..870398ddf6fec 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -195,25 +195,6 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { } } - private void assertDocCountOnAllCopies(String index, int expectedCount) throws Exception { - assertBusy(() -> { - Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - String xpath = "routing_table.indices." + index + ".shards.0.node"; - @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); - assertNotNull(state.toString(), assignedNodes); - for (String assignedNode : assignedNodes) { - try { - assertCount(index, "_only_nodes:" + assignedNode, expectedCount); - } catch (ResponseException e) { - if (e.getMessage().contains("no data nodes with criteria [" + assignedNode + "found for shard: [" + index + "][0]")) { - throw new AssertionError(e); // shard is relocating - ask assert busy to retry - } - throw e; - } - } - }); - } - private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { final int actualDocs; try { @@ -530,17 +511,6 @@ public void testClosedIndexNoopRecovery() throws Exception { break; } } - /** - * Returns the version in which the given index has been created - */ - private static Version indexVersionCreated(final String indexName) throws IOException { - final Request request = new Request("GET", "/" + indexName + "/_settings"); - final String versionCreatedSetting = indexName + ".settings.index.version.created"; - request.addParameter("filter_path", versionCreatedSetting); - - final Response response = client().performRequest(request); - return Version.fromId(Integer.parseInt(ObjectPath.createFromResponse(response).evaluate(versionCreatedSetting))); - } /** * Asserts that an index is closed in the cluster state. If `checkRoutingTable` is true, it also asserts @@ -587,20 +557,6 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - @SuppressWarnings("unchecked") - private void assertPeerRecoveredFiles(String reason, String index, String targetNode, Matcher sizeMatcher) throws IOException { - Map recoveryStats = entityAsMap(client().performRequest(new Request("GET", index + "/_recovery"))); - List> shards = (List>) XContentMapValues.extractValue(index + "." + "shards", recoveryStats); - for (Map shard : shards) { - if (Objects.equals(XContentMapValues.extractValue("type", shard), "PEER")) { - if (Objects.equals(XContentMapValues.extractValue("target.name", shard), targetNode)) { - Integer recoveredFileSize = (Integer) XContentMapValues.extractValue("index.files.recovered", shard); - assertThat(reason + " target node [" + targetNode + "] stats [" + recoveryStats + "]", recoveredFileSize, sizeMatcher); - } - } - } - } - @SuppressWarnings("unchecked") private void ensureGlobalCheckpointSynced(String index) throws Exception { assertBusy(() -> { diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 74cd4754efe44..2768a38cf673d 100644 --- a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -53,11 +53,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanTermQuery; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldComparator; @@ -76,7 +74,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; @@ -1178,75 +1175,6 @@ public void search(List leaves, Weight weight, Collector coll }; } - // used to check that numeric long or date sort optimization was run - private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader reader, int queryType, ExecutorService executor) - throws IOException { - return new ContextIndexSearcher( - reader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), - true, - executor - ) { - - @Override - public void search( - Query query, - CollectorManager manager, - QuerySearchResult result, - DocValueFormat[] formats, - TotalHits totalHits - ) throws IOException { - assertTrue(query instanceof BooleanQuery); - List clauses = ((BooleanQuery) query).clauses(); - assertTrue(clauses.size() == 2); - assertTrue(clauses.get(0).getOccur() == Occur.FILTER); - assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); - if (queryType == 0) { - assertTrue( - clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() - ); - } - if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.search(query, manager, result, formats, totalHits); - } - - @Override - public void search( - List leaves, - Weight weight, - @SuppressWarnings("rawtypes") CollectorManager manager, - QuerySearchResult result, - DocValueFormat[] formats, - TotalHits totalHits - ) throws IOException { - final Query query = weight.getQuery(); - assertTrue(query instanceof BooleanQuery); - List clauses = ((BooleanQuery) query).clauses(); - assertTrue(clauses.size() == 2); - assertTrue(clauses.get(0).getOccur() == Occur.FILTER); - assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); - if (queryType == 0) { - assertTrue( - clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() - ); - } - if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.search(leaves, weight, manager, result, formats, totalHits); - } - - @Override - public void search(List leaves, Weight weight, Collector collector) throws IOException { - if (getExecutor() == null) { - assert (false); // should not be there, expected to search with CollectorManager - } else { - super.search(leaves, weight, collector); - } - } - }; - } - private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager { private int totalHits; private final TotalHitCountCollector collector; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 955f0f0465d88..4664648c03ccc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -56,7 +56,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; @@ -486,12 +485,5 @@ TestFileStore getTestFileStore(Path path) { assertThat(path + " not contained in a unique tracked path", containingPaths, hasSize(1)); return trackedPaths.get(containingPaths.iterator().next()); } - - void clearTrackedPaths() throws IOException { - for (Path path : trackedPaths.keySet()) { - IOUtils.rm(path); - } - trackedPaths.clear(); - } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index 7d7f80c8ac758..d4273aee925f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -161,7 +161,4 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertThat(bucket.getDocCount(), equalTo(2L)); } - private MultiTermsValuesSourceConfig field(String name) { - return new MultiTermsValuesSourceConfig.Builder().setFieldName(name).build(); - } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index c3992f8d42967..e21616657502a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -36,18 +36,15 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.util.Accountable; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.engine.Segment; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -197,21 +194,6 @@ private static void toXContent(XContentBuilder builder, Sort sort) throws IOExce builder.endArray(); } - private static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { - builder.startObject(); - builder.field(Fields.DESCRIPTION, tree.toString()); - builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); - Collection children = tree.getChildResources(); - if (children.isEmpty() == false) { - builder.startArray(Fields.CHILDREN); - for (Accountable child : children) { - toXContent(builder, child); - } - builder.endArray(); - } - builder.endObject(); - } - /** * Fields for parsing and toXContent * diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index ec72ea8b49829..e6c943331f323 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -605,25 +605,4 @@ public static Set getPossibleRoleNames() { return roleMap.keySet(); } - /** - * Enum that holds all the possible roles that a node can fulfill in a cluster. - * Each role has its name and a corresponding abbreviation used by cat apis. - */ - private enum LegacyRole { - MASTER("master"), - DATA("data"), - INGEST("ingest"); - - private final String roleName; - - LegacyRole(final String roleName) { - this.roleName = roleName; - } - - public String roleName() { - return roleName; - } - - } - } diff --git a/server/src/main/java/org/opensearch/index/engine/Segment.java b/server/src/main/java/org/opensearch/index/engine/Segment.java index a341e73364356..a80256cb685c1 100644 --- a/server/src/main/java/org/opensearch/index/engine/Segment.java +++ b/server/src/main/java/org/opensearch/index/engine/Segment.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedNumericSelector; -import org.apache.lucene.util.Accountable; import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; @@ -48,7 +47,6 @@ import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; -import java.util.Collection; import java.util.Map; import java.util.Objects; @@ -317,17 +315,6 @@ private static void readRamTree(StreamInput in) throws IOException { } } - // the ram tree is written recursively since the depth is fairly low (5 or 6) - private void writeRamTree(StreamOutput out, Accountable tree) throws IOException { - out.writeString(tree.toString()); - out.writeVLong(tree.ramBytesUsed()); - Collection children = tree.getChildResources(); - out.writeVInt(children.size()); - for (Accountable child : children) { - writeRamTree(out, child); - } - } - @Override public String toString() { return "Segment{" diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 237d69e3ad244..0d332942644f4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -37,10 +37,8 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; @@ -197,18 +195,6 @@ private static String getRemainingFields(Map map) { return remainingFields.toString(); } - private Tuple> extractMapping(String type, String source) throws MapperParsingException { - Map root; - try ( - XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source) - ) { - root = parser.mapOrdered(); - } catch (Exception e) { - throw new MapperParsingException("failed to parse mapping definition", e); - } - return extractMapping(type, root); - } - /** * Given an optional type name and mapping definition, returns the type and a normalized form of the mappings. * diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 135a9788ab039..6b90a338ee606 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -32,14 +32,12 @@ package org.opensearch.index.query; -import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; @@ -185,12 +183,6 @@ public static Query newFilter(QueryShardContext context, String fieldPattern, bo return new ConstantScoreQuery(boolFilterBuilder.build()); } - private static Query newLegacyExistsQuery(QueryShardContext context, String field) { - MappedFieldType fieldType = context.fieldMapper(field); - String fieldName = fieldType != null ? fieldType.name() : field; - return new TermQuery(new Term(FieldNamesFieldMapper.NAME, fieldName)); - } - private static Query newFieldExistsQuery(QueryShardContext context, String field) { MappedFieldType fieldType = context.getMapperService().fieldType(field); if (fieldType == null) { diff --git a/server/src/main/java/org/opensearch/ingest/IngestMetadata.java b/server/src/main/java/org/opensearch/ingest/IngestMetadata.java index f55ba28e9c304..bf605833db6cf 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestMetadata.java +++ b/server/src/main/java/org/opensearch/ingest/IngestMetadata.java @@ -74,10 +74,6 @@ public final class IngestMetadata implements Metadata.Custom { // IngestMetadata is registered as custom metadata. private final Map pipelines; - private IngestMetadata() { - this.pipelines = Collections.emptyMap(); - } - public IngestMetadata(Map pipelines) { this.pipelines = Collections.unmodifiableMap(pipelines); } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 0123c839e4ad9..a4cd2fbe97bd5 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -935,21 +935,6 @@ public void applyClusterState(ClusterChangedEvent event) { assert assertNoDanglingSnapshots(event.state()); } - /** - * Cleanup all snapshots found in the given cluster state that have no more work left: - * 1. Completed snapshots - * 2. Snapshots in state INIT that a previous cluster-manager of an older version failed to start - * 3. Snapshots in any other state that have all their shard tasks completed - */ - private void endCompletedSnapshots(ClusterState state) { - SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - snapshotsInProgress.entries() - .stream() - .filter(entry -> entry.state().completed() || entry.state() == State.INIT || completed(entry.shards().values())) - .forEach(entry -> endSnapshot(entry, state.metadata(), null)); - } - private boolean assertConsistentWithClusterState(ClusterState state) { final SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); if (snapshotsInProgress.entries().isEmpty() == false) { @@ -1295,31 +1280,6 @@ private static boolean removedNodesCleanupNeeded(SnapshotsInProgress snapshotsIn }); } - /** - * Returns list of indices with missing shards, and list of indices that are closed - * - * @param shards list of shard statuses - * @return list of failed and closed indices - */ - private static Tuple, Set> indicesWithMissingShards( - ImmutableOpenMap shards, - Metadata metadata - ) { - Set missing = new HashSet<>(); - Set closed = new HashSet<>(); - for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == ShardState.MISSING) { - if (metadata.hasIndex(entry.key.getIndex().getName()) - && metadata.getIndexSafe(entry.key.getIndex()).getState() == IndexMetadata.State.CLOSE) { - closed.add(entry.key.getIndex().getName()); - } else { - missing.add(entry.key.getIndex().getName()); - } - } - } - return new Tuple<>(missing, closed); - } - /** * Finalizes the shard in repository and then removes it from cluster state *

    diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java index 079e5d388bbf4..f2ea34a9f187d 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -48,7 +48,6 @@ import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentParseException; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; @@ -215,22 +214,4 @@ public void testValidation() { conditionsGenerator.add((request) -> request.addMaxIndexAgeCondition(new TimeValue(randomNonNegativeLong()))); } - private static RolloverRequest createTestItem() throws IOException { - RolloverRequest rolloverRequest = new RolloverRequest(); - if (randomBoolean()) { - rolloverRequest.getCreateIndexRequest().mapping(RandomCreateIndexGenerator.randomMapping()); - } - if (randomBoolean()) { - RandomCreateIndexGenerator.randomAliases(rolloverRequest.getCreateIndexRequest()); - } - if (randomBoolean()) { - rolloverRequest.getCreateIndexRequest().settings(RandomCreateIndexGenerator.randomIndexSettings()); - } - int numConditions = randomIntBetween(0, 3); - List> conditions = randomSubsetOf(numConditions, conditionsGenerator); - for (Consumer consumer : conditions) { - consumer.accept(rolloverRequest); - } - return rolloverRequest; - } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java index d9ff21204387d..7bd2fefe50c42 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java @@ -52,13 +52,6 @@ private void createService(Settings settings) { noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); } - private void assertDeprecatedWarningEmitted() { - assertWarnings( - "[discovery.zen.no_master_block] setting was deprecated in OpenSearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version." - ); - } - public void testBlocksWritesByDefault() { createService(Settings.EMPTY); assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES)); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 3f39d67dee765..7fe58d75932a1 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -456,10 +456,4 @@ private static DiscoveryNode newClusterManagerNode(String nodeId, Map DATA_ROLE = Collections.unmodifiableSet( new HashSet<>(Collections.singletonList(DiscoveryNodeRole.DATA_ROLE)) ); - - private ClusterState removeNodes(ClusterState clusterState, String... nodeIds) { - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); - org.opensearch.common.collect.List.of(nodeIds).forEach(nodeBuilder::remove); - return allocationService.disassociateDeadNodes(ClusterState.builder(clusterState).nodes(nodeBuilder).build(), false, "test"); - } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 5348fd0a778f7..389cd19e4953e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.hamcrest.Matchers; import org.junit.Before; import org.opensearch.ExceptionsHelper; @@ -94,7 +93,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -1302,14 +1300,6 @@ private CompressedXContent createMapping(String fieldName, String fieldType) { } } - private static Map convertMappings(ImmutableOpenMap mappings) { - Map converted = new HashMap<>(mappings.size()); - for (ObjectObjectCursor cursor : mappings) { - converted.put(cursor.key, cursor.value); - } - return converted; - } - private ShardLimitValidator randomShardLimitService() { return createTestShardLimitService(randomIntBetween(10, 10000), false); } diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 94ddfd7e7f100..02bc6c58ad233 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -874,13 +874,6 @@ private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, Date assertSamePrinterOutput(format, javaDate, jodaDate, dateFormatter, jodaDateFormatter); } - private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, DateTime jodaDate, Locale locale) { - DateFormatter dateFormatter = DateFormatter.forPattern(format).withLocale(locale); - DateFormatter jodaDateFormatter = Joda.forPattern(format).withLocale(locale); - - assertSamePrinterOutput(format, javaDate, jodaDate, dateFormatter, jodaDateFormatter); - } - private void assertSamePrinterOutput( String format, ZonedDateTime javaDate, @@ -908,12 +901,6 @@ private void assertSameDate(String input, String format) { assertSameDate(input, format, jodaFormatter, javaFormatter); } - private void assertSameDate(String input, String format, Locale locale) { - DateFormatter jodaFormatter = Joda.forPattern(format).withLocale(locale); - DateFormatter javaFormatter = DateFormatter.forPattern(format).withLocale(locale); - assertSameDate(input, format, jodaFormatter, javaFormatter); - } - private void assertSameDate(String input, String format, DateFormatter jodaFormatter, DateFormatter javaFormatter) { DateTime jodaDateTime = jodaFormatter.parseJoda(input); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 28a420403bcc1..e4b2bbcacbdcd 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -3525,17 +3525,6 @@ public void testSkipTranslogReplay() throws IOException { } } - private Path[] filterExtraFSFiles(Path[] files) { - List paths = new ArrayList<>(); - for (Path p : files) { - if (p.getFileName().toString().startsWith("extra")) { - continue; - } - paths.add(p); - } - return paths.toArray(new Path[0]); - } - public void testTranslogReplay() throws IOException { final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getProcessedLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 085343f4ff2f7..490966c5c48e5 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -67,7 +67,6 @@ import org.joda.time.DateTimeZone; import java.io.IOException; -import java.time.Instant; import java.time.ZoneOffset; import java.util.Collections; @@ -365,10 +364,6 @@ public void testDateNanoDocValues() throws IOException { dir.close(); } - private Instant instant(String str) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(str)).toInstant(); - } - private static DateFieldType fieldType(Resolution resolution, String format, String nullValue) { DateFormatter formatter = DateFormatter.forPattern(format); return new DateFieldType("field", true, false, true, formatter, resolution, nullValue, Collections.emptyMap()); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogTests.java index 153677e00c22b..964fc8f21c388 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogTests.java @@ -1018,10 +1018,6 @@ private Term newUid(ParsedDocument doc) { return new Term("_id", Uid.encodeId(doc.id())); } - private Term newUid(String id) { - return new Term("_id", Uid.encodeId(id)); - } - public void testVerifyTranslogIsNotDeleted() throws IOException { assertFileIsPresent(translog, 1); translog.add(new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index b20154fff9256..fe8a7cb2b786b 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -80,15 +80,10 @@ import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.RestoreInProgress; -import org.opensearch.cluster.SnapshotDeletionsInProgress; -import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.coordination.OpenSearchNodeCommand; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -141,7 +136,6 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.store.IndicesStore; -import org.opensearch.ingest.IngestMetadata; import org.opensearch.monitor.os.OsInfo; import org.opensearch.node.NodeMocksPlugin; import org.opensearch.plugins.NetworkPlugin; @@ -149,7 +143,6 @@ import org.opensearch.rest.RestStatus; import org.opensearch.rest.action.RestCancellableNodeClient; import org.opensearch.script.MockScriptService; -import org.opensearch.script.ScriptMetadata; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchService; @@ -1248,37 +1241,6 @@ protected void ensureClusterStateCanBeReadByNodeTool() throws IOException { } } - private static final Set SAFE_METADATA_CUSTOMS = Collections.unmodifiableSet( - new HashSet<>(Arrays.asList(IndexGraveyard.TYPE, IngestMetadata.TYPE, RepositoriesMetadata.TYPE, ScriptMetadata.TYPE)) - ); - - private static final Set SAFE_CUSTOMS = Collections.unmodifiableSet( - new HashSet<>(Arrays.asList(RestoreInProgress.TYPE, SnapshotDeletionsInProgress.TYPE, SnapshotsInProgress.TYPE)) - ); - - /** - * Remove any customs except for customs that we know all clients understand. - * - * @param clusterState the cluster state to remove possibly-unknown customs from - * @return the cluster state with possibly-unknown customs removed - */ - private ClusterState removePluginCustoms(final ClusterState clusterState) { - final ClusterState.Builder builder = ClusterState.builder(clusterState); - clusterState.customs().keysIt().forEachRemaining(key -> { - if (SAFE_CUSTOMS.contains(key) == false) { - builder.removeCustom(key); - } - }); - final Metadata.Builder mdBuilder = Metadata.builder(clusterState.metadata()); - clusterState.metadata().customs().keysIt().forEachRemaining(key -> { - if (SAFE_METADATA_CUSTOMS.contains(key) == false) { - mdBuilder.removeCustom(key); - } - }); - builder.metadata(mdBuilder); - return builder.build(); - } - /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. From 84aba05b179180b148453a5d131bd52cdf061792 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 27 Oct 2022 11:40:33 -0400 Subject: [PATCH 100/122] Fixed compression support for h2c protocol (#4944) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 + .../HighLevelRestClientCompressionIT.java | 37 +++++++++++++++++++ .../netty4/Netty4HttpServerTransport.java | 19 +++++----- .../bootstrap/test-framework.policy | 5 +++ 4 files changed, 54 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f948e0f3d8d8..b17b996a5a284 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -174,6 +174,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) - [BUG]: Allow decommission to support delay timeout ([#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) - Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) +- Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) + ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java index 054d0ae8670b5..6985353806a01 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java @@ -31,13 +31,22 @@ package org.opensearch.client; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.apache.hc.client5.http.classic.methods.HttpPost; import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.entity.GzipCompressingEntity; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClients; +import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import static org.hamcrest.Matchers.equalTo; @@ -62,4 +71,32 @@ public void testCompressesResponseIfRequested() throws IOException { assertEquals(SAMPLE_DOCUMENT, searchResponse.getHits().getHits()[0].getSourceAsString()); } + /** + * The default CloseableHttpAsyncClient does not support compression out of the box (so that applies to RestClient + * and RestHighLevelClient). To check the compression works on both sides, crafting the request using CloseableHttpClient + * instead which uses compression by default. + */ + public void testCompressesRequest() throws IOException, URISyntaxException { + try (CloseableHttpClient client = HttpClients.custom().build()) { + final Node node = client().getNodes().iterator().next(); + final URI baseUri = new URI(node.getHost().toURI()); + + final HttpPut index = new HttpPut(baseUri.resolve("/company/_doc/1")); + index.setEntity(new GzipCompressingEntity(new StringEntity(SAMPLE_DOCUMENT, ContentType.APPLICATION_JSON))); + try (CloseableHttpResponse response = client.execute(index)) { + assertThat(response.getCode(), equalTo(201)); + } + + final HttpGet refresh = new HttpGet(baseUri.resolve("/_refresh")); + try (CloseableHttpResponse response = client.execute(refresh)) { + assertThat(response.getCode(), equalTo(200)); + } + + final HttpPost search = new HttpPost(baseUri.resolve("/_search")); + index.setEntity(new GzipCompressingEntity(new StringEntity("{}", ContentType.APPLICATION_JSON))); + try (CloseableHttpResponse response = client.execute(search)) { + assertThat(response.getCode(), equalTo(200)); + } + } + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index e3fde75e5b551..fcc9ab295c6c7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -413,18 +413,19 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP final ChannelPipeline pipeline = ctx.pipeline(); pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); - pipeline.replace(this, "aggregator", aggregator); + pipeline.replace(this, "decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addAfter("decoder_compress", "aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline() - .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addAfter( + "aggregator", + "encoder_compress", + new HttpContentCompressor(handlingSettings.getCompressionLevel()) + ); } - ch.pipeline().addBefore("handler", "request_creator", requestCreator); - ch.pipeline().addBefore("handler", "response_creator", responseCreator); - ch.pipeline() - .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addBefore("handler", "request_creator", requestCreator); + pipeline.addBefore("handler", "response_creator", responseCreator); + pipeline.addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); } diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index 60b704dc12f0c..b6b95a0ac98c8 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -111,6 +111,11 @@ grant codeBase "${codebase.httpcore5}" { permission java.net.SocketPermission "*", "connect"; }; +grant codeBase "${codebase.httpclient5}" { + // httpclient5 makes socket connections for rest tests + permission java.net.SocketPermission "*", "connect"; +}; + grant codeBase "${codebase.httpcore-nio}" { // httpcore makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; From fe0b9177c3201941701efa3ff67ac35fc2b1a1f4 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 27 Oct 2022 09:32:30 -0700 Subject: [PATCH 101/122] Revert PR 4656 to unblock Windows CI (#4949) * Revert PR 4656 to unblock Windows CI Signed-off-by: Poojita Raj * changelog added Signed-off-by: Poojita Raj Signed-off-by: Poojita Raj --- CHANGELOG.md | 3 +- buildSrc/build.gradle | 2 +- buildSrc/version.properties | 2 +- server/licenses/jna-5.12.1.jar.sha1 | 1 - server/licenses/jna-5.5.0.jar.sha1 | 1 + .../bootstrap/JNAAdvapi32Library.java | 124 ------------------ .../org/opensearch/bootstrap/JNANatives.java | 44 +------ 7 files changed, 8 insertions(+), 169 deletions(-) delete mode 100644 server/licenses/jna-5.12.1.jar.sha1 create mode 100644 server/licenses/jna-5.5.0.jar.sha1 delete mode 100644 server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java diff --git a/CHANGELOG.md b/CHANGELOG.md index b17b996a5a284..1998b6d75c6be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,7 +68,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) -- Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) - Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) - Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) @@ -97,7 +96,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) -- Include Windows OS in Bootstrap initializeNatives() check for definitelyRunningAsRoot() ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) @@ -120,6 +118,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) - Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) - Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Revert PR 4656 to unblock Windows CI ([#4949](https://github.com/opensearch-project/OpenSearch/pull/4949)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0b23631816fe9..3ef3c6f9faf49 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,7 +110,7 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' - api "net.java.dev.jna:jna:5.12.1" + api "net.java.dev.jna:jna:5.11.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'org.jdom:jdom2:2.0.6.1' api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f224c7d0ecc38..60148a0c1123f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,7 +22,7 @@ jettison = 1.5.1 woodstox = 6.4.0 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.12.1 +jna = 5.5.0 netty = 4.1.84.Final joda = 2.10.13 diff --git a/server/licenses/jna-5.12.1.jar.sha1 b/server/licenses/jna-5.12.1.jar.sha1 deleted file mode 100644 index 0d42f248c1afd..0000000000000 --- a/server/licenses/jna-5.12.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1e93a735caea94f503e95e6fe79bf9cdc1e985d \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 new file mode 100644 index 0000000000000..5621dfc743dd0 --- /dev/null +++ b/server/licenses/jna-5.5.0.jar.sha1 @@ -0,0 +1 @@ +0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java deleted file mode 100644 index 09b92c00684f4..0000000000000 --- a/server/src/main/java/org/opensearch/bootstrap/JNAAdvapi32Library.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.bootstrap; - -import com.sun.jna.Native; -import com.sun.jna.Pointer; -import com.sun.jna.ptr.IntByReference; -import com.sun.jna.ptr.PointerByReference; -import com.sun.jna.Structure; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.util.List; - -/** - * Library for Windows/Advapi32 - * - * @opensearch.internal - */ -final class JNAAdvapi32Library { - - private static final Logger logger = LogManager.getLogger(JNAAdvapi32Library.class); - - private static final class Holder { - private static final JNAAdvapi32Library instance = new JNAAdvapi32Library(); - } - - private JNAAdvapi32Library() { - if (Constants.WINDOWS) { - try { - Native.register("advapi32"); - logger.debug("windows/Advapi32 library loaded"); - } catch (NoClassDefFoundError e) { - logger.warn("JNA not found. native methods and handlers will be disabled."); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link Windows/Advapi32 library. native methods and handlers will be disabled."); - } - } - } - - static JNAAdvapi32Library getInstance() { - return Holder.instance; - } - - /** - * Access right required to query an access token. - * Used by {@link #OpenProcessToken(Pointer, int, PointerByReference)}. - * - * https://learn.microsoft.com/en-us/windows/win32/secauthz/access-rights-for-access-token-objects - */ - public static final int TOKEN_QUERY = 0x0008; - - /** - * TOKEN_INFORMATION_CLASS enumeration value that specifies the type of information being assigned to or retrieved from an access token. - * Used by {@link #GetTokenInformation(Pointer, int, Structure, int, IntByReference)}. - * - * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-token_information_class - */ - public static final int TOKEN_ELEVATION = 0x14; - - /** - * Native call to the Advapi32 API to open the access token associated with a process. - * - * @param processHandle Handle to the process whose access token is opened. - * The process must have the PROCESS_QUERY_INFORMATION access permission. - * @param desiredAccess Specifies an access mask that specifies the requested types of access to the access token. - * These requested access types are compared with the discretionary access control list (DACL) of the token to determine which accesses are granted or denied. - * @param tokenHandle Pointer to a handle that identifies the newly opened access token when the function returns. - * @return If the function succeeds, the return value is true. - * If the function fails, the return value is false. - * To get extended error information, call GetLastError. - */ - native boolean OpenProcessToken(Pointer processHandle, int desiredAccess, PointerByReference tokenHandle); - - /** - * Retrieves a specified type of information about an access token. - * The calling process must have appropriate access rights to obtain the information. - * - * @param tokenHandle Handle to an access token from which information is retrieved. - * If TokenInformationClass specifies TokenSource, the handle must have TOKEN_QUERY_SOURCE access. - * For all other TokenInformationClass values, the handle must have TOKEN_QUERY access. - * @param tokenInformationClass Specifies a value from the TOKEN_INFORMATION_CLASS enumerated type to identify the type of information the function retrieves. - * @param tokenInformation Pointer to a buffer the function fills with the requested information. - * The structure put into this buffer depends upon the type of information specified by the TokenInformationClass parameter. - * @param tokenInformationLength Specifies the size, in bytes, of the buffer pointed to by the TokenInformation parameter. - * If TokenInformation is NULL, this parameter must be zero. - * @param returnLength Pointer to a variable that receives the number of bytes needed for the buffer pointed to by the TokenInformation parameter. - * If this value is larger than the value specified in the TokenInformationLength parameter, the function fails and stores no data in the buffer. - * @return If the function succeeds, the return value is true. - * If the function fails, the return value is zero. - * To get extended error information, call GetLastError. - */ - native boolean GetTokenInformation( - Pointer tokenHandle, - int tokenInformationClass, - Structure tokenInformation, - int tokenInformationLength, - IntByReference returnLength - ); - - /** - * The TOKEN_ELEVATION structure indicates whether a token has elevated privileges. - * - * https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-token_elevation - */ - public static class TokenElevation extends Structure { - /** - * A nonzero value if the token has elevated privileges; otherwise, a zero value. - */ - public int TokenIsElevated; - - @Override - protected List getFieldOrder() { - return List.of("TokenIsElevated"); - } - } -} diff --git a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java index 12f65f8e4a216..033596033b0fd 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java @@ -35,19 +35,14 @@ import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.WString; -import com.sun.jna.ptr.IntByReference; -import com.sun.jna.ptr.PointerByReference; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.opensearch.bootstrap.JNAAdvapi32Library.TokenElevation; import org.opensearch.monitor.jvm.JvmInfo; import java.nio.file.Path; -import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_ELEVATION; -import static org.opensearch.bootstrap.JNAAdvapi32Library.TOKEN_QUERY; import static org.opensearch.bootstrap.JNAKernel32Library.SizeT; /** @@ -190,44 +185,13 @@ static String rlimitToString(long value) { /** Returns true if user is root, false if not, or if we don't know */ static boolean definitelyRunningAsRoot() { + if (Constants.WINDOWS) { + return false; // don't know + } try { - if (Constants.WINDOWS) { - JNAKernel32Library kernel32 = JNAKernel32Library.getInstance(); - JNAAdvapi32Library advapi32 = JNAAdvapi32Library.getInstance(); - - // Fetch a pseudo handle for the current process. - // The pseudo handle need not be closed when it is no longer needed (calling CloseHandle is a no-op). - Pointer process = kernel32.GetCurrentProcess(); - PointerByReference hToken = new PointerByReference(); - // Fetch the process token for the current process, for which we know we have the access rights - if (!advapi32.OpenProcessToken(process, TOKEN_QUERY, hToken)) { - logger.warn( - "Unable to open the Process Token for the current process [" + JNACLibrary.strerror(Native.getLastError()) + "]" - ); - return false; - } - // We have successfully opened the token. Ensure it gets closed after we use it. - try { - TokenElevation elevation = new TokenElevation(); - IntByReference returnLength = new IntByReference(); - if (!advapi32.GetTokenInformation(hToken.getValue(), TOKEN_ELEVATION, elevation, elevation.size(), returnLength)) { - logger.warn( - "Unable to get TokenElevation information for the current process [" - + JNACLibrary.strerror(Native.getLastError()) - + "]" - ); - return false; - } - // Nonzero value means elevated privileges - return elevation.TokenIsElevated > 0; - } finally { - kernel32.CloseHandle(hToken.getValue()); - } - } - // For unix-based systems, check effective user ID of process return JNACLibrary.geteuid() == 0; } catch (UnsatisfiedLinkError e) { - // this will have already been logged by Native Library, no need to repeat it + // this will have already been logged by Kernel32Library, no need to repeat it return false; } } From c4fc1bc0f29bc7a005e4755be5d2e1b22b7de72a Mon Sep 17 00:00:00 2001 From: Vishal Sarda Date: Thu, 27 Oct 2022 23:44:46 +0530 Subject: [PATCH 102/122] Make searchable snapshot indexes read-only but allow deletion (#4764) This commit adds a new cluster block that is added by default to all searchable snapshot indexes to block writes and metadata changes. Signed-off-by: Vishal Sarda --- CHANGELOG.md | 1 + .../snapshots/SearchableSnapshotIT.java | 84 +++++++++++++++++-- .../cluster/block/ClusterBlocks.java | 5 ++ .../cluster/metadata/IndexMetadata.java | 10 +++ 4 files changed, 95 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1998b6d75c6be..1f1c20d23b409 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) - Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) - Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) +- Make searchable snapshot indexes read-only but allow deletion ([#4764](https://github.com/opensearch-project/OpenSearch/pull/4764)) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) - Add icon for IntelliJidea toolbox ([#4882](https://github.com/opensearch-project/OpenSearch/pull/4882)) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 0ab025bb575cc..c528a311e0761 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -4,32 +4,37 @@ */ package org.opensearch.snapshots; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; - +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.hamcrest.MatcherAssert; import org.junit.BeforeClass; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; +import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.collect.Map; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.monitor.fs.FsInfo; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.contains; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; @@ -104,6 +109,75 @@ public void testCreateSearchableSnapshot() throws Exception { assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); } + public void testSearchableSnapshotIndexIsReadOnly() throws Exception { + final String indexName = "test-index"; + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + ensureGreen(); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices(indexName) + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete(indexName).get().isAcknowledged()); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertIndexingBlocked(indexName); + assertIndexSettingChangeBlocked(indexName); + assertTrue(client.admin().indices().prepareDelete(indexName).get().isAcknowledged()); + assertThrows( + "Expect index to not exist", + IndexNotFoundException.class, + () -> client.admin().indices().prepareGetIndex().setIndices(indexName).execute().actionGet() + ); + } + + private void assertIndexingBlocked(String index) { + try { + final IndexRequestBuilder builder = client().prepareIndex(index); + builder.setSource("foo", "bar"); + builder.execute().actionGet(); + fail("Expected operation to throw an exception"); + } catch (ClusterBlockException e) { + MatcherAssert.assertThat(e.blocks(), contains(IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE)); + } + } + + private void assertIndexSettingChangeBlocked(String index) { + try { + final UpdateSettingsRequestBuilder builder = client().admin().indices().prepareUpdateSettings(index); + builder.setSettings(Map.of("index.refresh_interval", 10)); + builder.execute().actionGet(); + fail("Expected operation to throw an exception"); + } catch (ClusterBlockException e) { + MatcherAssert.assertThat(e.blocks(), contains(IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE)); + } + } + /** * Picks a shard out of the cluster state for each given index and asserts * that the 'index' directory does not exist in the node's file system. diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index f5dc0eb8fdb5e..13d8909b41ff5 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -42,6 +42,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.set.Sets; +import org.opensearch.index.IndexModule; import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -393,6 +394,10 @@ public Builder addBlocks(IndexMetadata indexMetadata) { if (IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetadata.getSettings())) { addIndexBlock(indexName, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); } + if (IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey() + .equals(indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) { + addIndexBlock(indexName, IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE); + } return this; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index b6ca8c52cd818..5a9b3e516ea9a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -149,6 +149,16 @@ public class IndexMetadata implements Diffable, ToXContentFragmen EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE) ); + public static final ClusterBlock REMOTE_READ_ONLY_ALLOW_DELETE = new ClusterBlock( + 13, + "remote index is read-only", + false, + false, + true, + RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE) + ); + /** * The state of the index. * From 6e90080a330278bec3c109825240dd8c327e961c Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 27 Oct 2022 14:10:25 -0700 Subject: [PATCH 103/122] Fix for failing checkExtraction, checkLicense and checkNotice tasks for windows gradle check (#4941) * Fix for failing checkExtraction and checkLicense tasks Signed-off-by: Poojita Raj * changelog added Signed-off-by: Poojita Raj Signed-off-by: Poojita Raj --- CHANGELOG.md | 2 +- .../internal/InternalDistributionArchiveCheckPlugin.java | 7 ++++--- .../internal/InternalDistributionArchiveSetupPlugin.java | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f1c20d23b409..30b281da91949 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -175,7 +175,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [BUG]: Allow decommission to support delay timeout ([#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) - Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - +- Fix for failing checkExtraction, checkLicense and checkNotice tasks for windows gradle check ([#4941](https://github.com/opensearch-project/OpenSearch/pull/4941)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java index 2a162e5f12d7b..96a2928b6e71e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java @@ -73,12 +73,14 @@ public void apply(Project project) { .create("distributionArchiveCheck", DistributionArchiveCheckExtension.class); File archiveExtractionDir = calculateArchiveExtractionDir(project); - // sanity checks if archives can be extracted TaskProvider checkExtraction = registerCheckExtractionTask(project, buildDistTask, archiveExtractionDir); + checkExtraction.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkLicense = registerCheckLicenseTask(project, checkExtraction); + checkLicense.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkNotice = registerCheckNoticeTask(project, checkExtraction); + checkNotice.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkTask = project.getTasks().named("check"); checkTask.configure(task -> { task.dependsOn(checkExtraction); @@ -118,7 +120,7 @@ public void execute(Task task) { } private TaskProvider registerCheckLicenseTask(Project project, TaskProvider checkExtraction) { - TaskProvider checkLicense = project.getTasks().register("checkLicense", task -> { + return project.getTasks().register("checkLicense", task -> { task.dependsOn(checkExtraction); task.doLast(new Action() { @Override @@ -138,7 +140,6 @@ public void execute(Task task) { } }); }); - return checkLicense; } private TaskProvider registerCheckExtractionTask(Project project, TaskProvider buildDistTask, File archiveExtractionDir) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index 8adfbff424278..0944f3960467b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -87,7 +87,7 @@ public void apply(Project project) { configureTarDefaults(project); } - private Action configure(String name) { + static Action configure(String name) { return (Task task) -> task.onlyIf(s -> { if (OperatingSystem.current().isWindows()) { // On Windows, include only Windows distributions and integTestZip From e34de9fcb0b14532d35a59cb6b51e181909ce0f2 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Fri, 28 Oct 2022 07:55:19 -0700 Subject: [PATCH 104/122] [Segment Replication] Fix bug of replica shard's translog not purging on index flush. (#4928) * Fix bug of replica shard's translog not purging on index flush and fixing related unit tests. Signed-off-by: Rishikesh1159 * Update change log Signed-off-by: Rishikesh1159 * Update change log message Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 --- CHANGELOG.md | 3 ++ .../index/engine/NRTReplicationEngine.java | 1 + .../SegmentReplicationIndexShardTests.java | 32 ++++++++++++++++--- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 30b281da91949..1771db70c0ff4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -171,11 +171,14 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) - Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) +- Fix a bug on handling an invalid array value for point type field ([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) +- [Segment Replication] Fix bug of replica shard's translog not purging on index flush when segment replication is enabled ([4928](https://github.com/opensearch-project/OpenSearch/pull/4928)) - Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) - [BUG]: Allow decommission to support delay timeout ([#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) - Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Fix for failing checkExtraction, checkLicense and checkNotice tasks for windows gradle check ([#4941](https://github.com/opensearch-project/OpenSearch/pull/4941)) + ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 12d420aa245fa..b7bb4c0b40011 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -129,6 +129,7 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th // lower/higher gens are possible from a new primary that was just elected. if (incomingGeneration != lastReceivedGen) { commitSegmentInfos(); + translogManager.getDeletionPolicy().setLocalCheckpointOfSafeCommit(seqNo); translogManager.rollTranslogGeneration(); } lastReceivedGen = incomingGeneration; diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index b3cbd9b00f0b1..aaaf95af85191 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -246,16 +246,36 @@ public void testReplicaReceivesGenIncrease() throws Exception { final IndexShard replica = shards.getReplicas().get(0); final int numDocs = randomIntBetween(10, 100); shards.indexDocs(numDocs); + assertEquals(numDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(numDocs, replica.translogStats().getUncommittedOperations()); flushShard(primary, true); replicateSegments(primary, shards.getReplicas()); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); - final int totalDocs = numDocs + shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); - flushShard(primary); + final int additionalDocs = shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); + + final int totalDocs = numDocs + additionalDocs; + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertEquals(additionalDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(additionalDocs, replica.translogStats().getUncommittedOperations()); + flushShard(primary, true); replicateSegments(primary, shards.getReplicas()); assertEqualCommittedSegments(primary, replica); assertDocCount(primary, totalDocs); assertDocCount(replica, totalDocs); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); } } @@ -388,16 +408,20 @@ public void testNRTReplicaPromotedAsPrimary() throws Exception { // 2. Create ops that are in the replica's xlog, not in the index. // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs // persisted. - final int totalDocs = numDocs + shards.indexDocs(randomInt(10)); + final int additonalDocs = shards.indexDocs(randomInt(10)); + final int totalDocs = numDocs + additonalDocs; assertDocCounts(oldPrimary, totalDocs, totalDocs); for (IndexShard shard : shards.getReplicas()) { assertDocCounts(shard, totalDocs, numDocs); } + assertEquals(additonalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(additonalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(additonalDocs, nextPrimary.translogStats().getUncommittedOperations()); + assertEquals(additonalDocs, replica.translogStats().getUncommittedOperations()); // promote the replica shards.syncGlobalCheckpoint(); - assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); shards.promoteReplicaToPrimary(nextPrimary); // close and start the oldPrimary as a replica. From 171d14153c8542f25b52428863f1ec9372efd671 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 28 Oct 2022 13:03:05 -0700 Subject: [PATCH 105/122] Fix backport issues for CHANGELOG.md file (#4977) Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/backport.yml | 5 +++-- CHANGELOG.md | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index e47d8d88c0243..c1c2505a62245 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,7 +22,8 @@ jobs: installation_id: 22958780 - name: Backport - uses: VachaShah/backport@v1.1.4 + uses: VachaShah/backport@v2.1.0 with: github_token: ${{ steps.github_app_token.outputs.token }} - branch_name: backport/backport-${{ github.event.number }} + head_template: backport/backport-<%= number %>-to-<%= base %> + files_to_skip: 'CHANGELOG.md' diff --git a/CHANGELOG.md b/CHANGELOG.md index 1771db70c0ff4..3307b9ccb99f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -178,6 +178,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Fix for failing checkExtraction, checkLicense and checkNotice tasks for windows gradle check ([#4941](https://github.com/opensearch-project/OpenSearch/pull/4941)) +- Backport failures for merge conflicts on CHANGELOG.md file ([#4977](https://github.com/opensearch-project/OpenSearch/pull/4977)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From d2c2adee127b0c6c6f6bcd8cf8457172e6cfdc19 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Sat, 29 Oct 2022 01:09:01 -0500 Subject: [PATCH 106/122] [Upgrade] to lucene-9.5.0-snapshot-a4ef70f (#4979) * [Upgrade] to lucene-9.5.0-snapshot-a4ef70f Upgrades to latest snapshot of lucene 9.5. Signed-off-by: Nicholas Walter Knize * update changelog Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 ++-- .../licenses/lucene-expressions-9.4.1.jar.sha1 | 1 - .../lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 | 1 - .../lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-nori-9.4.1.jar.sha1 | 1 - .../lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-stempel-9.4.1.jar.sha1 | 1 - .../lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + .../licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 | 1 - ...lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-analysis-common-9.4.1.jar.sha1 | 1 - .../lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 | 1 - .../lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-core-9.4.1.jar.sha1 | 1 - server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-grouping-9.4.1.jar.sha1 | 1 - .../licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-highlighter-9.4.1.jar.sha1 | 1 - .../lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-join-9.4.1.jar.sha1 | 1 - server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-memory-9.4.1.jar.sha1 | 1 - server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-misc-9.4.1.jar.sha1 | 1 - server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-queries-9.4.1.jar.sha1 | 1 - .../licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-queryparser-9.4.1.jar.sha1 | 1 - .../lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.4.1.jar.sha1 | 1 - .../licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 | 1 - .../lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-spatial3d-9.4.1.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/licenses/lucene-suggest-9.4.1.jar.sha1 | 1 - .../licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 + server/src/main/java/org/opensearch/Version.java | 2 +- 47 files changed, 26 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.4.1.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3307b9ccb99f1..de006288527d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 ([#4889](https://github.com/opensearch-project/OpenSearch/pull/4889)) - Update Apache Lucene to 9.4.1 ([#4922](https://github.com/opensearch-project/OpenSearch/pull/4922)) - Bump `woodstox-core` to 6.4.0 ([#4947](https://github.com/opensearch-project/OpenSearch/pull/4947)) +- Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 60148a0c1123f..daea380c4b30d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.4.1 +lucene = 9.5.0-snapshot-a4ef70f bundled_jdk_vendor = adoptium bundled_jdk = 17.0.4+8 @@ -14,7 +14,7 @@ jackson_databind = 2.13.4.2 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 -# Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) +# Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) log4j = 2.17.1 slf4j = 1.7.36 asm = 9.4 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 deleted file mode 100644 index f8eb0a7bdb033..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -574ac602f98475c1d061adee71ed90dce2eee679 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..0e1f3e37f508a --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c92a0928724b04224157ce2d3e105953f57f94db \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 deleted file mode 100644 index 77c20d4ed8073..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b217251a8c13669ec4066871d026e4cb84abf702 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..a49a0749a9e4a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +a7c38619d8f2cc48f792e007aa25b430f4f25698 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 deleted file mode 100644 index b2090016c9f5d..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d80cf8993498784bcb6e4dcefdcebd3ddc4ca022 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..709bcf84faf06 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +6243383e5fbcf87551ded4c1b48b69a4276bb748 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 deleted file mode 100644 index 2f5d27452f2f3..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -082f4870f58b17bcc60dedb399fe82964ef0ad6f \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..0c4d7b7a2755c --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +91d1560bc927f1a431bb92e47fda9395d3b3e551 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 deleted file mode 100644 index 146d5eebbe800..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -733411a30c1b1b473b93d4b28f59f8914d7ed83f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..82524cbdb4ada --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +26bbfd1a796d62006dff9c7e32d31a0397a8025e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 deleted file mode 100644 index 61f11cfb32aee..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19d011e47bab7ccd1389095834d18ad7bf98f050 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..af6b600d22090 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +a1a26c04e24d9a8573e6bd9a0bacad184821dd33 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 deleted file mode 100644 index 9259308e80d4b..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e48a8d411fffb331484c9485a6bdbf1d856ba06 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..ea5680869c187 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +19aa9eff0e0671fd91eb435a2e2fa29dec52cf5c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 deleted file mode 100644 index 77f7bdd37cd8f..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec94415a249a7814fa76d96667173fae875112a2 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..4f81941a1746e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +05ff979dfe3ded901ccd72d5a5d66349286c44bf \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 deleted file mode 100644 index dcee87ac5273a..0000000000000 --- a/server/licenses/lucene-analysis-common-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -692cbd2e220222adff279e922d1c14bb10dae8b0 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..e12c20e2a64b8 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +84d717ed509f8ce484c57fea720d8de2a6afdaa6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 deleted file mode 100644 index 927cdf30dce6f..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2826b0fd18033b6d1625b8313ab28c7f43fac692 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..e78e165acddb3 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +087bcc11526f8dcc56742dd8188bd05ad0329161 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.1.jar.sha1 b/server/licenses/lucene-core-9.4.1.jar.sha1 deleted file mode 100644 index 8a12d131b158e..0000000000000 --- a/server/licenses/lucene-core-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48ff655927016066d4746b485d63f0586d859c5f \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..bd5fc52fb86c3 --- /dev/null +++ b/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +e949897fa24e14d2701a3c41fe27a4f094681b81 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.1.jar.sha1 b/server/licenses/lucene-grouping-9.4.1.jar.sha1 deleted file mode 100644 index c6507cada010f..0000000000000 --- a/server/licenses/lucene-grouping-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -690585f53f4ea00fae94c72fbf711f693a3981c5 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..17aa27ceac3bf --- /dev/null +++ b/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +6cb53ca55f7e313ed19852ae37fca4ad2e4caa0c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.1.jar.sha1 b/server/licenses/lucene-highlighter-9.4.1.jar.sha1 deleted file mode 100644 index 58019cc9702d0..0000000000000 --- a/server/licenses/lucene-highlighter-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e97e3003a80c67ddf33a456ab54413b12764d23d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..7f248580a6a49 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c7f650e33ac11e01bb5c2e35e4eb080a9ce245b8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.1.jar.sha1 b/server/licenses/lucene-join-9.4.1.jar.sha1 deleted file mode 100644 index 2f0765f2b0a3c..0000000000000 --- a/server/licenses/lucene-join-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8749a1b71eacb85c47ed90265ea98c1c8b4e5abd \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..88fef91bee929 --- /dev/null +++ b/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +914ea03f71043a9291623628396a97a4c1901f8c \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.1.jar.sha1 b/server/licenses/lucene-memory-9.4.1.jar.sha1 deleted file mode 100644 index 8a8674b776623..0000000000000 --- a/server/licenses/lucene-memory-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89797042e65179325f40c173fc20e227d3df4f29 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..f6422c2e72fda --- /dev/null +++ b/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +e83ecf8c4f5991f8e4ea319fc9194c933e02f66d \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.1.jar.sha1 b/server/licenses/lucene-misc-9.4.1.jar.sha1 deleted file mode 100644 index 3c4e6f89400bc..0000000000000 --- a/server/licenses/lucene-misc-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ae8465d5499ab39012c3c81d82014667c5c03dd2 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..262190789814d --- /dev/null +++ b/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +5adc5753c741847cd84cb11ebfcd613bedc11beb \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.1.jar.sha1 b/server/licenses/lucene-queries-9.4.1.jar.sha1 deleted file mode 100644 index f897fda0074bf..0000000000000 --- a/server/licenses/lucene-queries-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c78e99a46fabd2a7a175fe170a1537de2b9bca95 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..f8bba3d90a0f1 --- /dev/null +++ b/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +824272a064aa2fff1f952b5ae383e80aef4e45f8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.1.jar.sha1 b/server/licenses/lucene-queryparser-9.4.1.jar.sha1 deleted file mode 100644 index f82637ff48a42..0000000000000 --- a/server/licenses/lucene-queryparser-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98051f363cd92fd324aca47b662c4a71b9419e49 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..652ccd298c9d9 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +d5bf983dfb6183b390bdc9d3b41b88b6ee6f780e \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.1.jar.sha1 b/server/licenses/lucene-sandbox-9.4.1.jar.sha1 deleted file mode 100644 index 210fc28c4d45a..0000000000000 --- a/server/licenses/lucene-sandbox-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c82a38b0fe1042ef11751cdbb1be689b7a00428 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..b51328d19065a --- /dev/null +++ b/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +fff58cc6b79887348b45c9d06bff39d055540738 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 deleted file mode 100644 index b1430cf47a867..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7171406be9b864a4929edff8601b3d69645c64ae \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..37a22d637a051 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c391f1df56d63dff3c6543da15c87105f2106c86 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 deleted file mode 100644 index 482526b6032b6..0000000000000 --- a/server/licenses/lucene-spatial3d-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ede97d83f0c62ba84ae1e2279f411ae87ecaca1 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..b0c9924752852 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +794109c75534b1c3a19a29bcb66692f0e0708744 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.1.jar.sha1 b/server/licenses/lucene-suggest-9.4.1.jar.sha1 deleted file mode 100644 index ed4e8c4e3ca66..0000000000000 --- a/server/licenses/lucene-suggest-9.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60e7740d2894ac7a0e97c3eabaee4823a02b0f9c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..63f5d8123c2cf --- /dev/null +++ b/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +dc5fdd92541f4e78256152d3efc11bdb67ffdc91 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 99b72a5332bcf..9143a84676330 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -100,7 +100,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { From 28ba311ae34480cd158cbf7fc986ce4954e01204 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Sun, 30 Oct 2022 15:12:00 -0700 Subject: [PATCH 107/122] Fix dependencies (#4963) * Upgrading kotlin-stdlib and excluding jetty-http Signed-off-by: Vacha Shah * Upgrading indirect snakeyaml dependency Signed-off-by: Vacha Shah * Update CHANGELOG Signed-off-by: Vacha Shah * Using snakeyaml version from version file Signed-off-by: Vacha Shah * Upgrading jetty-http instead of excluding since it is used Signed-off-by: Vacha Shah * Extracting kotlin version Signed-off-by: Vacha Shah Signed-off-by: Vacha Shah --- CHANGELOG.md | 1 + buildSrc/build.gradle | 3 ++- buildSrc/version.properties | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 ++ 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de006288527d3..27626ca61c3d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,6 +80,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update Apache Lucene to 9.4.1 ([#4922](https://github.com/opensearch-project/OpenSearch/pull/4922)) - Bump `woodstox-core` to 6.4.0 ([#4947](https://github.com/opensearch-project/OpenSearch/pull/4947)) - Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) +- Upgrade jetty-http, kotlin-stdlib and snakeyaml ([#4963](https://github.com/opensearch-project/OpenSearch/pull/4963)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3ef3c6f9faf49..498edaf057a34 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -113,9 +113,10 @@ dependencies { api "net.java.dev.jna:jna:5.11.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'org.jdom:jdom2:2.0.6.1' - api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' + api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' + api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.69' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" diff --git a/buildSrc/version.properties b/buildSrc/version.properties index daea380c4b30d..8dd54da07f3d2 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,6 +20,7 @@ slf4j = 1.7.36 asm = 9.4 jettison = 1.5.1 woodstox = 6.4.0 +kotlin = 1.7.10 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 6c00465b6c842..cd60f1e7d8a8f 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -50,4 +50,6 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:3.21.8" + api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" + api 'org.eclipse.jetty:jetty-server:9.4.49.v20220914' } From 6a56b39646737aa31558142b41a577426eb89126 Mon Sep 17 00:00:00 2001 From: Ketan Verma Date: Mon, 31 Oct 2022 20:06:59 +0530 Subject: [PATCH 108/122] Added resource usage trackers for in-flight cancellation of SearchShardTask (#4805) * Added resource usage trackers for in-flight cancellation of SearchShardTask 1. CpuUsageTracker: cancels tasks if they consume too much CPU 2. ElapsedTimeTracker: cancels tasks if they consume too much time 3. HeapUsageTracker: cancels tasks if they consume too much heap Signed-off-by: Ketan Verma --- CHANGELOG.md | 1 + .../common/settings/ClusterSettings.java | 14 +- .../opensearch/common/util/TokenBucket.java | 6 +- .../SearchBackpressureService.java | 18 ++- .../settings/SearchBackpressureSettings.java | 57 +++++-- .../settings/SearchShardTaskSettings.java | 128 ++------------- .../trackers/CpuUsageTracker.java | 82 ++++++++++ .../trackers/ElapsedTimeTracker.java | 86 ++++++++++ .../trackers/HeapUsageTracker.java | 148 ++++++++++++++++++ .../trackers/TaskResourceUsageTracker.java | 4 +- .../TaskResourceUsageTrackerType.java | 28 ++++ .../opensearch/tasks/TaskCancellation.java | 5 +- .../common/{ => util}/StreakTests.java | 3 +- .../SearchBackpressureServiceTests.java | 6 +- .../trackers/CpuUsageTrackerTests.java | 48 ++++++ .../trackers/ElapsedTimeTrackerTests.java | 49 ++++++ .../trackers/HeapUsageTrackerTests.java | 83 ++++++++++ .../tasks/TaskCancellationTests.java | 2 +- 18 files changed, 611 insertions(+), 157 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java create mode 100644 server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTrackerType.java rename server/src/test/java/org/opensearch/common/{ => util}/StreakTests.java (92%) create mode 100644 server/src/test/java/org/opensearch/search/backpressure/trackers/CpuUsageTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTrackerTests.java create mode 100644 server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 27626ca61c3d7..1a0ff44256a46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) - Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) - Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) +- Added resource usage trackers for in-flight cancellation of SearchShardTask ([#4805](https://github.com/opensearch-project/OpenSearch/pull/4805)) - Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 12a3a883af347..dc9b9303b7b40 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -44,6 +44,9 @@ import org.opensearch.search.backpressure.settings.NodeDuressSettings; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.trackers.CpuUsageTracker; +import org.opensearch.search.backpressure.trackers.ElapsedTimeTracker; +import org.opensearch.search.backpressure.trackers.HeapUsageTracker; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; @@ -596,11 +599,12 @@ public void apply(Settings value, Settings current, Settings previous) { NodeDuressSettings.SETTING_NUM_SUCCESSIVE_BREACHES, NodeDuressSettings.SETTING_CPU_THRESHOLD, NodeDuressSettings.SETTING_HEAP_THRESHOLD, - SearchShardTaskSettings.SETTING_TOTAL_HEAP_THRESHOLD, - SearchShardTaskSettings.SETTING_HEAP_THRESHOLD, - SearchShardTaskSettings.SETTING_HEAP_VARIANCE_THRESHOLD, - SearchShardTaskSettings.SETTING_CPU_TIME_THRESHOLD, - SearchShardTaskSettings.SETTING_ELAPSED_TIME_THRESHOLD + SearchShardTaskSettings.SETTING_TOTAL_HEAP_PERCENT_THRESHOLD, + HeapUsageTracker.SETTING_HEAP_PERCENT_THRESHOLD, + HeapUsageTracker.SETTING_HEAP_VARIANCE_THRESHOLD, + HeapUsageTracker.SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE, + CpuUsageTracker.SETTING_CPU_TIME_MILLIS_THRESHOLD, + ElapsedTimeTracker.SETTING_ELAPSED_TIME_MILLIS_THRESHOLD ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java index e47f152d71363..d2e7e836bf07f 100644 --- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -101,9 +101,9 @@ public boolean request() { */ private static class State { final double tokens; - final double lastRefilledAt; + final long lastRefilledAt; - public State(double tokens, double lastRefilledAt) { + public State(double tokens, long lastRefilledAt) { this.tokens = tokens; this.lastRefilledAt = lastRefilledAt; } @@ -113,7 +113,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; State state = (State) o; - return Double.compare(state.tokens, tokens) == 0 && Double.compare(state.lastRefilledAt, lastRefilledAt) == 0; + return Double.compare(state.tokens, tokens) == 0 && lastRefilledAt == state.lastRefilledAt; } @Override diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 885846a177d60..66efa832dec37 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -17,6 +17,9 @@ import org.opensearch.monitor.jvm.JvmStats; import org.opensearch.monitor.process.ProcessProbe; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.trackers.CpuUsageTracker; +import org.opensearch.search.backpressure.trackers.ElapsedTimeTracker; +import org.opensearch.search.backpressure.trackers.HeapUsageTracker; import org.opensearch.search.backpressure.trackers.NodeDuressTracker; import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; import org.opensearch.tasks.CancellableTask; @@ -29,7 +32,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Optional; @@ -84,7 +86,7 @@ public SearchBackpressureService( () -> JvmStats.jvmStats().getMem().getHeapUsedPercent() / 100.0 >= settings.getNodeDuressSettings().getHeapThreshold() ) ), - Collections.emptyList() + List.of(new CpuUsageTracker(settings), new HeapUsageTracker(settings), new ElapsedTimeTracker(settings, System::nanoTime)) ); } @@ -97,7 +99,7 @@ public SearchBackpressureService( List taskResourceUsageTrackers ) { this.settings = settings; - this.settings.setListener(this); + this.settings.addListener(this); this.taskResourceTrackingService = taskResourceTrackingService; this.taskResourceTrackingService.addTaskCompletionListener(this); this.threadPool = threadPool; @@ -181,10 +183,10 @@ boolean isNodeInDuress() { * Returns true if the increase in heap usage is due to search requests. */ boolean isHeapUsageDominatedBySearch(List searchShardTasks) { - long runningTasksHeapUsage = searchShardTasks.stream().mapToLong(task -> task.getTotalResourceStats().getMemoryInBytes()).sum(); - long searchTasksHeapThreshold = getSettings().getSearchShardTaskSettings().getTotalHeapThresholdBytes(); - if (runningTasksHeapUsage < searchTasksHeapThreshold) { - logger.debug("heap usage not dominated by search requests [{}/{}]", runningTasksHeapUsage, searchTasksHeapThreshold); + long usage = searchShardTasks.stream().mapToLong(task -> task.getTotalResourceStats().getMemoryInBytes()).sum(); + long threshold = getSettings().getSearchShardTaskSettings().getTotalHeapBytesThreshold(); + if (usage < threshold) { + logger.debug("heap usage not dominated by search requests [{}/{}]", usage, threshold); return false; } @@ -213,7 +215,7 @@ TaskCancellation getTaskCancellation(CancellableTask task) { List callbacks = new ArrayList<>(); for (TaskResourceUsageTracker tracker : taskResourceUsageTrackers) { - Optional reason = tracker.cancellationReason(task); + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); if (reason.isPresent()) { reasons.add(reason.get()); callbacks.add(tracker::incrementCancellations); diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index 4834808d768f1..5ceb01666757f 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -8,13 +8,16 @@ package org.opensearch.search.backpressure.settings; -import org.apache.lucene.util.SetOnce; +import org.opensearch.ExceptionsHelper; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; /** * Settings related to search backpressure and cancellation of in-flight requests. @@ -23,7 +26,7 @@ */ public class SearchBackpressureSettings { private static class Defaults { - private static final long INTERVAL = 1000; + private static final long INTERVAL_MILLIS = 1000; private static final boolean ENABLED = true; private static final boolean ENFORCED = false; @@ -37,9 +40,9 @@ private static class Defaults { * Defines the interval (in millis) at which the SearchBackpressureService monitors and cancels tasks. */ private final TimeValue interval; - public static final Setting SETTING_INTERVAL = Setting.longSetting( - "search_backpressure.interval", - Defaults.INTERVAL, + public static final Setting SETTING_INTERVAL_MILLIS = Setting.longSetting( + "search_backpressure.interval_millis", + Defaults.INTERVAL_MILLIS, 1, Setting.Property.NodeScope ); @@ -116,15 +119,19 @@ public interface Listener { void onCancellationBurstChanged(); } - private final SetOnce listener = new SetOnce<>(); + private final List listeners = new ArrayList<>(); + private final Settings settings; + private final ClusterSettings clusterSettings; private final NodeDuressSettings nodeDuressSettings; private final SearchShardTaskSettings searchShardTaskSettings; public SearchBackpressureSettings(Settings settings, ClusterSettings clusterSettings) { + this.settings = settings; + this.clusterSettings = clusterSettings; this.nodeDuressSettings = new NodeDuressSettings(settings, clusterSettings); this.searchShardTaskSettings = new SearchShardTaskSettings(settings, clusterSettings); - interval = new TimeValue(SETTING_INTERVAL.get(settings)); + interval = new TimeValue(SETTING_INTERVAL_MILLIS.get(settings)); enabled = SETTING_ENABLED.get(settings); clusterSettings.addSettingsUpdateConsumer(SETTING_ENABLED, this::setEnabled); @@ -142,8 +149,16 @@ public SearchBackpressureSettings(Settings settings, ClusterSettings clusterSett clusterSettings.addSettingsUpdateConsumer(SETTING_CANCELLATION_BURST, this::setCancellationBurst); } - public void setListener(Listener listener) { - this.listener.set(listener); + public void addListener(Listener listener) { + listeners.add(listener); + } + + public Settings getSettings() { + return settings; + } + + public ClusterSettings getClusterSettings() { + return clusterSettings; } public NodeDuressSettings getNodeDuressSettings() { @@ -180,9 +195,7 @@ public double getCancellationRatio() { private void setCancellationRatio(double cancellationRatio) { this.cancellationRatio = cancellationRatio; - if (listener.get() != null) { - listener.get().onCancellationRatioChanged(); - } + notifyListeners(Listener::onCancellationRatioChanged); } public double getCancellationRate() { @@ -195,9 +208,7 @@ public double getCancellationRateNanos() { private void setCancellationRate(double cancellationRate) { this.cancellationRate = cancellationRate; - if (listener.get() != null) { - listener.get().onCancellationRateChanged(); - } + notifyListeners(Listener::onCancellationRateChanged); } public double getCancellationBurst() { @@ -206,8 +217,20 @@ public double getCancellationBurst() { private void setCancellationBurst(double cancellationBurst) { this.cancellationBurst = cancellationBurst; - if (listener.get() != null) { - listener.get().onCancellationBurstChanged(); + notifyListeners(Listener::onCancellationBurstChanged); + } + + private void notifyListeners(Consumer consumer) { + List exceptions = new ArrayList<>(); + + for (Listener listener : listeners) { + try { + consumer.accept(listener); + } catch (Exception e) { + exceptions.add(e); + } } + + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java index 1126dad78f554..7e40f1c0eab53 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchShardTaskSettings.java @@ -22,139 +22,37 @@ public class SearchShardTaskSettings { private static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); private static class Defaults { - private static final double TOTAL_HEAP_THRESHOLD = 0.05; - private static final double HEAP_THRESHOLD = 0.005; - private static final double HEAP_VARIANCE_THRESHOLD = 2.0; - private static final long CPU_TIME_THRESHOLD = 15; - private static final long ELAPSED_TIME_THRESHOLD = 30000; + private static final double TOTAL_HEAP_PERCENT_THRESHOLD = 0.05; } /** * Defines the heap usage threshold (in percentage) for the sum of heap usages across all search shard tasks * before in-flight cancellation is applied. */ - private volatile double totalHeapThreshold; - public static final Setting SETTING_TOTAL_HEAP_THRESHOLD = Setting.doubleSetting( - "search_backpressure.search_shard_task.total_heap_threshold", - Defaults.TOTAL_HEAP_THRESHOLD, + private volatile double totalHeapPercentThreshold; + public static final Setting SETTING_TOTAL_HEAP_PERCENT_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.total_heap_percent_threshold", + Defaults.TOTAL_HEAP_PERCENT_THRESHOLD, 0.0, 1.0, Setting.Property.Dynamic, Setting.Property.NodeScope ); - /** - * Defines the heap usage threshold (in percentage) for an individual task before it is considered for cancellation. - */ - private volatile double heapThreshold; - public static final Setting SETTING_HEAP_THRESHOLD = Setting.doubleSetting( - "search_backpressure.search_shard_task.heap_threshold", - Defaults.HEAP_THRESHOLD, - 0.0, - 1.0, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Defines the heap usage variance for an individual task before it is considered for cancellation. - * A task is considered for cancellation when taskHeapUsage is greater than or equal to heapUsageMovingAverage * variance. - */ - private volatile double heapVarianceThreshold; - public static final Setting SETTING_HEAP_VARIANCE_THRESHOLD = Setting.doubleSetting( - "search_backpressure.search_shard_task.heap_variance", - Defaults.HEAP_VARIANCE_THRESHOLD, - 0.0, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Defines the CPU usage threshold (in millis) for an individual task before it is considered for cancellation. - */ - private volatile long cpuTimeThreshold; - public static final Setting SETTING_CPU_TIME_THRESHOLD = Setting.longSetting( - "search_backpressure.search_shard_task.cpu_time_threshold", - Defaults.CPU_TIME_THRESHOLD, - 0, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - /** - * Defines the elapsed time threshold (in millis) for an individual task before it is considered for cancellation. - */ - private volatile long elapsedTimeThreshold; - public static final Setting SETTING_ELAPSED_TIME_THRESHOLD = Setting.longSetting( - "search_backpressure.search_shard_task.elapsed_time_threshold", - Defaults.ELAPSED_TIME_THRESHOLD, - 0, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - public SearchShardTaskSettings(Settings settings, ClusterSettings clusterSettings) { - totalHeapThreshold = SETTING_TOTAL_HEAP_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SETTING_TOTAL_HEAP_THRESHOLD, this::setTotalHeapThreshold); - - heapThreshold = SETTING_HEAP_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_THRESHOLD, this::setHeapThreshold); - - heapVarianceThreshold = SETTING_HEAP_VARIANCE_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SETTING_HEAP_VARIANCE_THRESHOLD, this::setHeapVarianceThreshold); - - cpuTimeThreshold = SETTING_CPU_TIME_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SETTING_CPU_TIME_THRESHOLD, this::setCpuTimeThreshold); - - elapsedTimeThreshold = SETTING_ELAPSED_TIME_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SETTING_ELAPSED_TIME_THRESHOLD, this::setElapsedTimeThreshold); - } - - public double getTotalHeapThreshold() { - return totalHeapThreshold; - } - - public long getTotalHeapThresholdBytes() { - return (long) (HEAP_SIZE_BYTES * getTotalHeapThreshold()); - } - - private void setTotalHeapThreshold(double totalHeapThreshold) { - this.totalHeapThreshold = totalHeapThreshold; - } - - public double getHeapThreshold() { - return heapThreshold; - } - - public long getHeapThresholdBytes() { - return (long) (HEAP_SIZE_BYTES * getHeapThreshold()); - } - - private void setHeapThreshold(double heapThreshold) { - this.heapThreshold = heapThreshold; - } - - public double getHeapVarianceThreshold() { - return heapVarianceThreshold; - } - - private void setHeapVarianceThreshold(double heapVarianceThreshold) { - this.heapVarianceThreshold = heapVarianceThreshold; - } - - public long getCpuTimeThreshold() { - return cpuTimeThreshold; + totalHeapPercentThreshold = SETTING_TOTAL_HEAP_PERCENT_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(SETTING_TOTAL_HEAP_PERCENT_THRESHOLD, this::setTotalHeapPercentThreshold); } - private void setCpuTimeThreshold(long cpuTimeThreshold) { - this.cpuTimeThreshold = cpuTimeThreshold; + public double getTotalHeapPercentThreshold() { + return totalHeapPercentThreshold; } - public long getElapsedTimeThreshold() { - return elapsedTimeThreshold; + public long getTotalHeapBytesThreshold() { + return (long) (HEAP_SIZE_BYTES * getTotalHeapPercentThreshold()); } - private void setElapsedTimeThreshold(long elapsedTimeThreshold) { - this.elapsedTimeThreshold = elapsedTimeThreshold; + private void setTotalHeapPercentThreshold(double totalHeapPercentThreshold) { + this.totalHeapPercentThreshold = totalHeapPercentThreshold; } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java new file mode 100644 index 0000000000000..fa4f88d8a54e6 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; + +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.backpressure.trackers.TaskResourceUsageTrackerType.CPU_USAGE_TRACKER; + +/** + * CpuUsageTracker evaluates if the task has consumed too many CPU cycles than allowed. + * + * @opensearch.internal + */ +public class CpuUsageTracker extends TaskResourceUsageTracker { + private static class Defaults { + private static final long CPU_TIME_MILLIS_THRESHOLD = 15000; + } + + /** + * Defines the CPU usage threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long cpuTimeMillisThreshold; + public static final Setting SETTING_CPU_TIME_MILLIS_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.cpu_time_millis_threshold", + Defaults.CPU_TIME_MILLIS_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public CpuUsageTracker(SearchBackpressureSettings settings) { + this.cpuTimeMillisThreshold = SETTING_CPU_TIME_MILLIS_THRESHOLD.get(settings.getSettings()); + settings.getClusterSettings().addSettingsUpdateConsumer(SETTING_CPU_TIME_MILLIS_THRESHOLD, this::setCpuTimeMillisThreshold); + } + + @Override + public String name() { + return CPU_USAGE_TRACKER.getName(); + } + + @Override + public Optional checkAndMaybeGetCancellationReason(Task task) { + long usage = task.getTotalResourceStats().getCpuTimeInNanos(); + long threshold = getCpuTimeNanosThreshold(); + + if (usage < threshold) { + return Optional.empty(); + } + + return Optional.of( + new TaskCancellation.Reason( + "cpu usage exceeded [" + + new TimeValue(usage, TimeUnit.NANOSECONDS) + + " >= " + + new TimeValue(threshold, TimeUnit.NANOSECONDS) + + "]", + 1 // TODO: fine-tune the cancellation score/weight + ) + ); + } + + public long getCpuTimeNanosThreshold() { + return TimeUnit.MILLISECONDS.toNanos(cpuTimeMillisThreshold); + } + + public void setCpuTimeMillisThreshold(long cpuTimeMillisThreshold) { + this.cpuTimeMillisThreshold = cpuTimeMillisThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java new file mode 100644 index 0000000000000..d76ccdb76db36 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; + +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; + +import static org.opensearch.search.backpressure.trackers.TaskResourceUsageTrackerType.ELAPSED_TIME_TRACKER; + +/** + * ElapsedTimeTracker evaluates if the task has been running for more time than allowed. + * + * @opensearch.internal + */ +public class ElapsedTimeTracker extends TaskResourceUsageTracker { + private static class Defaults { + private static final long ELAPSED_TIME_MILLIS_THRESHOLD = 30000; + } + + /** + * Defines the elapsed time threshold (in millis) for an individual task before it is considered for cancellation. + */ + private volatile long elapsedTimeMillisThreshold; + public static final Setting SETTING_ELAPSED_TIME_MILLIS_THRESHOLD = Setting.longSetting( + "search_backpressure.search_shard_task.elapsed_time_millis_threshold", + Defaults.ELAPSED_TIME_MILLIS_THRESHOLD, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final LongSupplier timeNanosSupplier; + + public ElapsedTimeTracker(SearchBackpressureSettings settings, LongSupplier timeNanosSupplier) { + this.timeNanosSupplier = timeNanosSupplier; + this.elapsedTimeMillisThreshold = SETTING_ELAPSED_TIME_MILLIS_THRESHOLD.get(settings.getSettings()); + settings.getClusterSettings().addSettingsUpdateConsumer(SETTING_ELAPSED_TIME_MILLIS_THRESHOLD, this::setElapsedTimeMillisThreshold); + } + + @Override + public String name() { + return ELAPSED_TIME_TRACKER.getName(); + } + + @Override + public Optional checkAndMaybeGetCancellationReason(Task task) { + long usage = timeNanosSupplier.getAsLong() - task.getStartTimeNanos(); + long threshold = getElapsedTimeNanosThreshold(); + + if (usage < threshold) { + return Optional.empty(); + } + + return Optional.of( + new TaskCancellation.Reason( + "elapsed time exceeded [" + + new TimeValue(usage, TimeUnit.NANOSECONDS) + + " >= " + + new TimeValue(threshold, TimeUnit.NANOSECONDS) + + "]", + 1 // TODO: fine-tune the cancellation score/weight + ) + ); + } + + public long getElapsedTimeNanosThreshold() { + return TimeUnit.MILLISECONDS.toNanos(elapsedTimeMillisThreshold); + } + + public void setElapsedTimeMillisThreshold(long elapsedTimeMillisThreshold) { + this.elapsedTimeMillisThreshold = elapsedTimeMillisThreshold; + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java new file mode 100644 index 0000000000000..983a2e1152511 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.search.backpressure.trackers.TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER; + +/** + * HeapUsageTracker evaluates if the task has consumed too much heap than allowed. + * It also compares the task's heap usage against a historical moving average of previously completed tasks. + * + * @opensearch.internal + */ +public class HeapUsageTracker extends TaskResourceUsageTracker { + private static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); + + private static class Defaults { + private static final double HEAP_PERCENT_THRESHOLD = 0.005; + private static final double HEAP_VARIANCE_THRESHOLD = 2.0; + private static final int HEAP_MOVING_AVERAGE_WINDOW_SIZE = 100; + } + + /** + * Defines the heap usage threshold (in percentage) for an individual task before it is considered for cancellation. + */ + private volatile double heapPercentThreshold; + public static final Setting SETTING_HEAP_PERCENT_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_percent_threshold", + Defaults.HEAP_PERCENT_THRESHOLD, + 0.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the heap usage variance for an individual task before it is considered for cancellation. + * A task is considered for cancellation when taskHeapUsage is greater than or equal to heapUsageMovingAverage * variance. + */ + private volatile double heapVarianceThreshold; + public static final Setting SETTING_HEAP_VARIANCE_THRESHOLD = Setting.doubleSetting( + "search_backpressure.search_shard_task.heap_variance", + Defaults.HEAP_VARIANCE_THRESHOLD, + 0.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Defines the window size to calculate the moving average of heap usage of completed tasks. + */ + private volatile int heapMovingAverageWindowSize; + public static final Setting SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( + "search_backpressure.search_shard_task.heap_moving_average_window_size", + Defaults.HEAP_MOVING_AVERAGE_WINDOW_SIZE, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final AtomicReference movingAverageReference; + + public HeapUsageTracker(SearchBackpressureSettings settings) { + heapPercentThreshold = SETTING_HEAP_PERCENT_THRESHOLD.get(settings.getSettings()); + settings.getClusterSettings().addSettingsUpdateConsumer(SETTING_HEAP_PERCENT_THRESHOLD, this::setHeapPercentThreshold); + + heapVarianceThreshold = SETTING_HEAP_VARIANCE_THRESHOLD.get(settings.getSettings()); + settings.getClusterSettings().addSettingsUpdateConsumer(SETTING_HEAP_VARIANCE_THRESHOLD, this::setHeapVarianceThreshold); + + heapMovingAverageWindowSize = SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE.get(settings.getSettings()); + settings.getClusterSettings() + .addSettingsUpdateConsumer(SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE, this::setHeapMovingAverageWindowSize); + + this.movingAverageReference = new AtomicReference<>(new MovingAverage(heapMovingAverageWindowSize)); + } + + @Override + public String name() { + return HEAP_USAGE_TRACKER.getName(); + } + + @Override + public void update(Task task) { + movingAverageReference.get().record(task.getTotalResourceStats().getMemoryInBytes()); + } + + @Override + public Optional checkAndMaybeGetCancellationReason(Task task) { + MovingAverage movingAverage = movingAverageReference.get(); + + // There haven't been enough measurements. + if (movingAverage.isReady() == false) { + return Optional.empty(); + } + + double currentUsage = task.getTotalResourceStats().getMemoryInBytes(); + double averageUsage = movingAverage.getAverage(); + double allowedUsage = averageUsage * getHeapVarianceThreshold(); + + if (currentUsage < getHeapBytesThreshold() || currentUsage < allowedUsage) { + return Optional.empty(); + } + + return Optional.of( + new TaskCancellation.Reason( + "heap usage exceeded [" + new ByteSizeValue((long) currentUsage) + " >= " + new ByteSizeValue((long) allowedUsage) + "]", + (int) (currentUsage / averageUsage) // TODO: fine-tune the cancellation score/weight + ) + ); + } + + public long getHeapBytesThreshold() { + return (long) (HEAP_SIZE_BYTES * heapPercentThreshold); + } + + public void setHeapPercentThreshold(double heapPercentThreshold) { + this.heapPercentThreshold = heapPercentThreshold; + } + + public double getHeapVarianceThreshold() { + return heapVarianceThreshold; + } + + public void setHeapVarianceThreshold(double heapVarianceThreshold) { + this.heapVarianceThreshold = heapVarianceThreshold; + } + + public void setHeapMovingAverageWindowSize(int heapMovingAverageWindowSize) { + this.heapMovingAverageWindowSize = heapMovingAverageWindowSize; + this.movingAverageReference.set(new MovingAverage(heapMovingAverageWindowSize)); + } +} diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java index 8f1842efa5771..1765dee42ae15 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java @@ -41,10 +41,10 @@ public long getCancellations() { /** * Notifies the tracker to update its state when a task execution completes. */ - public abstract void update(Task task); + public void update(Task task) {} /** * Returns the cancellation reason for the given task, if it's eligible for cancellation. */ - public abstract Optional cancellationReason(Task task); + public abstract Optional checkAndMaybeGetCancellationReason(Task task); } diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTrackerType.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTrackerType.java new file mode 100644 index 0000000000000..7a74321241534 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTrackerType.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +/** + * Defines the type of TaskResourceUsageTracker. + */ +public enum TaskResourceUsageTrackerType { + CPU_USAGE_TRACKER("cpu_usage_tracker"), + HEAP_USAGE_TRACKER("heap_usage_tracker"), + ELAPSED_TIME_TRACKER("elapsed_time_tracker"); + + private final String name; + + TaskResourceUsageTrackerType(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java index 2c302172cf6bc..d09312f38e3eb 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -15,7 +15,10 @@ import java.util.stream.Collectors; /** - * TaskCancellation is a wrapper for a task and its cancellation reasons. + * TaskCancellation represents a task eligible for cancellation. + * It doesn't guarantee that the task will actually get cancelled or not; that decision is left to the caller. + * + * It contains a list of cancellation reasons along with callbacks that are invoked when cancel() is called. * * @opensearch.internal */ diff --git a/server/src/test/java/org/opensearch/common/StreakTests.java b/server/src/test/java/org/opensearch/common/util/StreakTests.java similarity index 92% rename from server/src/test/java/org/opensearch/common/StreakTests.java rename to server/src/test/java/org/opensearch/common/util/StreakTests.java index 80080ad3e4027..682a28d3a3a8b 100644 --- a/server/src/test/java/org/opensearch/common/StreakTests.java +++ b/server/src/test/java/org/opensearch/common/util/StreakTests.java @@ -6,9 +6,8 @@ * compatible open source license. */ -package org.opensearch.common; +package org.opensearch.common.util; -import org.opensearch.common.util.Streak; import org.opensearch.test.OpenSearchTestCase; public class StreakTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index ac5a8229718ba..7e16e55e16e59 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -132,7 +132,7 @@ public String name() { public void update(Task task) {} @Override - public Optional cancellationReason(Task task) { + public Optional checkAndMaybeGetCancellationReason(Task task) { if (task.getTotalResourceStats().getCpuTimeInNanos() < 300) { return Optional.empty(); } @@ -167,10 +167,10 @@ public Optional cancellationReason(Task task) { service.doRun(); service.doRun(); - // Mocking 'settings' with predictable searchHeapThresholdBytes so that cancellation logic doesn't get skipped. + // Mocking 'settings' with predictable totalHeapBytesThreshold so that cancellation logic doesn't get skipped. long taskHeapUsageBytes = 500; SearchShardTaskSettings shardTaskSettings = mock(SearchShardTaskSettings.class); - when(shardTaskSettings.getTotalHeapThresholdBytes()).thenReturn(taskHeapUsageBytes); + when(shardTaskSettings.getTotalHeapBytesThreshold()).thenReturn(taskHeapUsageBytes); when(settings.getSearchShardTaskSettings()).thenReturn(shardTaskSettings); // Create a mix of low and high resource usage tasks (60 low + 15 high resource usage tasks). diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/CpuUsageTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/CpuUsageTrackerTests.java new file mode 100644 index 0000000000000..c790fb2e60eea --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/CpuUsageTrackerTests.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Optional; + +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; + +public class CpuUsageTrackerTests extends OpenSearchTestCase { + private static final SearchBackpressureSettings mockSettings = new SearchBackpressureSettings( + Settings.builder() + .put(CpuUsageTracker.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 15) // 15 ms + .build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + public void testEligibleForCancellation() { + Task task = createMockTaskWithResourceStats(SearchShardTask.class, 200000000, 200); + CpuUsageTracker tracker = new CpuUsageTracker(mockSettings); + + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); + assertTrue(reason.isPresent()); + assertEquals(1, reason.get().getCancellationScore()); + assertEquals("cpu usage exceeded [200ms >= 15ms]", reason.get().getMessage()); + } + + public void testNotEligibleForCancellation() { + Task task = createMockTaskWithResourceStats(SearchShardTask.class, 5000000, 200); + CpuUsageTracker tracker = new CpuUsageTracker(mockSettings); + + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); + assertFalse(reason.isPresent()); + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTrackerTests.java new file mode 100644 index 0000000000000..67ed6059a1914 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTrackerTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Optional; + +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; + +public class ElapsedTimeTrackerTests extends OpenSearchTestCase { + + private static final SearchBackpressureSettings mockSettings = new SearchBackpressureSettings( + Settings.builder() + .put(ElapsedTimeTracker.SETTING_ELAPSED_TIME_MILLIS_THRESHOLD.getKey(), 100) // 100 ms + .build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + public void testEligibleForCancellation() { + Task task = createMockTaskWithResourceStats(SearchShardTask.class, 1, 1, 0); + ElapsedTimeTracker tracker = new ElapsedTimeTracker(mockSettings, () -> 200000000); + + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); + assertTrue(reason.isPresent()); + assertEquals(1, reason.get().getCancellationScore()); + assertEquals("elapsed time exceeded [200ms >= 100ms]", reason.get().getMessage()); + } + + public void testNotEligibleForCancellation() { + Task task = createMockTaskWithResourceStats(SearchShardTask.class, 1, 1, 150000000); + ElapsedTimeTracker tracker = new ElapsedTimeTracker(mockSettings, () -> 200000000); + + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); + assertFalse(reason.isPresent()); + } +} diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java new file mode 100644 index 0000000000000..b9967da22fbf1 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure.trackers; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Optional; + +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; + +public class HeapUsageTrackerTests extends OpenSearchTestCase { + private static final long HEAP_BYTES_THRESHOLD = 100; + private static final int HEAP_MOVING_AVERAGE_WINDOW_SIZE = 100; + + private static final SearchBackpressureSettings mockSettings = new SearchBackpressureSettings( + Settings.builder() + .put(HeapUsageTracker.SETTING_HEAP_VARIANCE_THRESHOLD.getKey(), 2.0) + .put(HeapUsageTracker.SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE.getKey(), HEAP_MOVING_AVERAGE_WINDOW_SIZE) + .build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + public void testEligibleForCancellation() { + HeapUsageTracker tracker = spy(new HeapUsageTracker(mockSettings)); + when(tracker.getHeapBytesThreshold()).thenReturn(HEAP_BYTES_THRESHOLD); + Task task = createMockTaskWithResourceStats(SearchShardTask.class, 1, 50); + + // Record enough observations to make the moving average 'ready'. + for (int i = 0; i < HEAP_MOVING_AVERAGE_WINDOW_SIZE; i++) { + tracker.update(task); + } + + // Task that has heap usage >= heapBytesThreshold and (movingAverage * heapVariance). + task = createMockTaskWithResourceStats(SearchShardTask.class, 1, 200); + Optional reason = tracker.checkAndMaybeGetCancellationReason(task); + assertTrue(reason.isPresent()); + assertEquals(4, reason.get().getCancellationScore()); + assertEquals("heap usage exceeded [200b >= 100b]", reason.get().getMessage()); + } + + public void testNotEligibleForCancellation() { + Task task; + Optional reason; + HeapUsageTracker tracker = spy(new HeapUsageTracker(mockSettings)); + when(tracker.getHeapBytesThreshold()).thenReturn(HEAP_BYTES_THRESHOLD); + + // Task with heap usage < heapBytesThreshold. + task = createMockTaskWithResourceStats(SearchShardTask.class, 1, 99); + + // Not enough observations. + reason = tracker.checkAndMaybeGetCancellationReason(task); + assertFalse(reason.isPresent()); + + // Record enough observations to make the moving average 'ready'. + for (int i = 0; i < HEAP_MOVING_AVERAGE_WINDOW_SIZE; i++) { + tracker.update(task); + } + + // Task with heap usage < heapBytesThreshold should not be cancelled. + reason = tracker.checkAndMaybeGetCancellationReason(task); + assertFalse(reason.isPresent()); + + // Task with heap usage between heapBytesThreshold and (movingAverage * heapVariance) should not be cancelled. + double allowedHeapUsage = 99.0 * 2.0; + task = createMockTaskWithResourceStats(SearchShardTask.class, 1, randomLongBetween(99, (long) allowedHeapUsage - 1)); + reason = tracker.checkAndMaybeGetCancellationReason(task); + assertFalse(reason.isPresent()); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java index d6a82702e074d..50a510f954677 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationTests.java @@ -64,7 +64,7 @@ public String name() { public void update(Task task) {} @Override - public Optional cancellationReason(Task task) { + public Optional checkAndMaybeGetCancellationReason(Task task) { return Optional.empty(); } }; From 140e8d3e6c91519edc47be07b4cd053fdfac1769 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 31 Oct 2022 17:16:41 -0700 Subject: [PATCH 109/122] Add bwcVersion 2.5.0 (#5003) * Add bwcVersion 2.5.0 Signed-off-by: Vacha Shah * Fixing version id Signed-off-by: Vacha Shah Signed-off-by: Vacha Shah --- .ci/bwcVersions | 1 + CHANGELOG.md | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 3 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e82101896818e..b6acb886dc327 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -53,3 +53,4 @@ BWC_VERSION: - "2.3.0" - "2.3.1" - "2.4.0" + - "2.5.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a0ff44256a46..37fa60835dff0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) - Added resource usage trackers for in-flight cancellation of SearchShardTask ([#4805](https://github.com/opensearch-project/OpenSearch/pull/4805)) - Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) +- Update previous release bwc version to 2.5.0 ([#5003](https://github.com/opensearch-project/OpenSearch/pull/5003)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 9143a84676330..2dffc46db1e5b 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -100,6 +100,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; From 34b6b09474ab83be856c3afc20fb101c63244ee6 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 1 Nov 2022 08:10:00 -0400 Subject: [PATCH 110/122] OpenJDK Update (October 2022 Patch releases) (#4997) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- buildSrc/version.properties | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37fa60835dff0..dfa74fc9396a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bump `woodstox-core` to 6.4.0 ([#4947](https://github.com/opensearch-project/OpenSearch/pull/4947)) - Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) - Upgrade jetty-http, kotlin-stdlib and snakeyaml ([#4963](https://github.com/opensearch-project/OpenSearch/pull/4963)) +- OpenJDK Update (October 2022 Patch releases) ([#4997](https://github.com/opensearch-project/OpenSearch/pull/4997)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index b14e93ecfd22d..e7c907dfdf000 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -75,9 +75,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.16+8"; + private static final String SYSTEM_JDK_VERSION = "11.0.17+8"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.4+8"; + private static final String GRADLE_JDK_VERSION = "17.0.5+8"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8dd54da07f3d2..a42faa4a62080 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.5.0-snapshot-a4ef70f bundled_jdk_vendor = adoptium -bundled_jdk = 17.0.4+8 +bundled_jdk = 17.0.5+8 From db418fb8acc18aef95af7c4c4253d87660fb9e93 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 1 Nov 2022 07:18:45 -0500 Subject: [PATCH 111/122] [Remove] LegacyESVersion.V_7_8_* and V_7_9_* constants (#4855) * [Remove] LegacyESVersion.V_7_8_* and V_7_9_* constants Removes all usages of LegacyESVersion.V_7_8_ and LegacyESVersion.V_7_9 version constants along with ancient API logic. Signed-off-by: Nicholas Walter Knize * update changelog Signed-off-by: Nicholas Walter Knize * revert masterNodeTimeout method call Signed-off-by: Nicholas Walter Knize * cleanup MappingUpdatedAction to remove unexecuted code Signed-off-by: Nicholas Walter Knize * revert timeout deletion Signed-off-by: Nicholas Walter Knize Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../java/org/opensearch/LegacyESVersion.java | 6 ---- .../org/opensearch/OpenSearchException.java | 4 +-- .../opensearch/action/DocWriteRequest.java | 3 +- .../AddVotingConfigExclusionsRequest.java | 16 +++------ .../admin/cluster/node/stats/NodeStats.java | 22 +++--------- .../node/tasks/cancel/CancelTasksRequest.java | 9 ++--- .../admin/cluster/node/usage/NodeUsage.java | 11 ++---- .../cluster/node/usage/NodesUsageRequest.java | 9 ++--- .../indices/alias/IndicesAliasesRequest.java | 10 ++---- .../admin/indices/get/GetIndexResponse.java | 23 +++++------- .../mapping/put/PutMappingRequest.java | 9 ++--- .../action/bulk/BulkItemRequest.java | 3 +- .../action/bulk/BulkShardRequest.java | 9 ++--- .../action/bulk/BulkShardResponse.java | 14 ++------ .../action/bulk/TransportBulkAction.java | 7 +--- .../FieldCapabilitiesIndexRequest.java | 11 +++--- .../FieldCapabilitiesIndexResponse.java | 7 ++-- .../fieldcaps/FieldCapabilitiesRequest.java | 11 +++--- .../ingest/SimulateProcessorResult.java | 12 +++---- .../org/opensearch/cluster/ClusterInfo.java | 11 ++---- .../action/index/MappingUpdatedAction.java | 18 +++------- .../metadata/ComposableIndexTemplate.java | 11 ++---- .../MetadataCreateDataStreamService.java | 4 --- .../opensearch/index/store/StoreStats.java | 15 ++------ .../recovery/PeerRecoveryTargetService.java | 7 +--- .../recovery/RecoveryTransportRequest.java | 12 ++----- .../recovery/RetryableTransportClient.java | 3 +- .../common/ReplicationLuceneIndex.java | 15 ++------ .../org/opensearch/script/ScriptStats.java | 7 ++-- .../java/org/opensearch/search/SearchHit.java | 25 +++---------- ...AbstractPercentilesAggregationBuilder.java | 28 ++------------- .../metrics/InternalScriptedMetric.java | 24 ++----------- .../support/MultiValuesSourceFieldConfig.java | 11 ++---- .../search/profile/ProfileResult.java | 11 ++---- .../opensearch/snapshots/RestoreService.java | 10 ++---- .../tasks/TaskCancellationService.java | 11 ++---- .../index/MappingUpdatedActionTests.java | 35 ++----------------- .../cluster/node/DiscoveryNodeTests.java | 5 ++- .../index/mapper/ParametrizedMapperTests.java | 3 +- 40 files changed, 92 insertions(+), 371 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfa74fc9396a8..218c291e1417c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,6 +125,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) - Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) - Revert PR 4656 to unblock Windows CI ([#4949](https://github.com/opensearch-project/OpenSearch/pull/4949)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index 6e2aeaa5f7b4f..c020089afd1b9 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -53,12 +53,6 @@ public class LegacyESVersion extends Version { public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_7_0 = new LegacyESVersion(7070099, org.apache.lucene.util.Version.LUCENE_8_5_1); public static final LegacyESVersion V_7_7_1 = new LegacyESVersion(7070199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_8_0 = new LegacyESVersion(7080099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_8_1 = new LegacyESVersion(7080199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_9_0 = new LegacyESVersion(7090099, org.apache.lucene.util.Version.LUCENE_8_6_0); - public static final LegacyESVersion V_7_9_1 = new LegacyESVersion(7090199, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final LegacyESVersion V_7_9_2 = new LegacyESVersion(7090299, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final LegacyESVersion V_7_9_3 = new LegacyESVersion(7090399, org.apache.lucene.util.Version.LUCENE_8_6_2); public static final LegacyESVersion V_7_10_0 = new LegacyESVersion(7100099, org.apache.lucene.util.Version.LUCENE_8_7_0); public static final LegacyESVersion V_7_10_1 = new LegacyESVersion(7100199, org.apache.lucene.util.Version.LUCENE_8_7_0); public static final LegacyESVersion V_7_10_2 = new LegacyESVersion(7100299, org.apache.lucene.util.Version.LUCENE_8_7_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 78bda1cf088cd..4cbbb1ca4bc3f 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -1576,13 +1576,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.indices.recovery.PeerRecoveryNotFound.class, org.opensearch.indices.recovery.PeerRecoveryNotFound::new, 158, - LegacyESVersion.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NODE_HEALTH_CHECK_FAILURE_EXCEPTION( org.opensearch.cluster.coordination.NodeHealthCheckFailureException.class, org.opensearch.cluster.coordination.NodeHealthCheckFailureException::new, 159, - LegacyESVersion.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NO_SEED_NODE_LEFT_EXCEPTION( org.opensearch.transport.NoSeedNodeLeftException.class, diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index a789189f9227e..ed59b5e95a01f 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -231,8 +231,7 @@ public static OpType fromString(String sOpType) { * Read a document write (index/delete/update) request * * @param shardId shard id of the request. {@code null} when reading as part of a {@link org.opensearch.action.bulk.BulkRequest} - * that does not have a unique shard id or when reading from a stream of version older than - * {@link org.opensearch.action.bulk.BulkShardRequest#COMPACT_SHARD_ID_VERSION} + * that does not have a unique shard id */ static DocWriteRequest readDocumentRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { byte type = in.readByte(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index a2a77a1316898..4b52f553a88e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.cluster.configuration; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.ClusterState; @@ -110,13 +109,8 @@ public AddVotingConfigExclusionsRequest(String[] nodeDescriptions, String[] node public AddVotingConfigExclusionsRequest(StreamInput in) throws IOException { super(in); nodeDescriptions = in.readStringArray(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - nodeIds = in.readStringArray(); - nodeNames = in.readStringArray(); - } else { - nodeIds = Strings.EMPTY_ARRAY; - nodeNames = Strings.EMPTY_ARRAY; - } + nodeIds = in.readStringArray(); + nodeNames = in.readStringArray(); timeout = in.readTimeValue(); if (nodeDescriptions.length > 0) { @@ -249,10 +243,8 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(nodeDescriptions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeStringArray(nodeIds); - out.writeStringArray(nodeNames); - } + out.writeStringArray(nodeIds); + out.writeStringArray(nodeNames); out.writeTimeValue(timeout); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 7f0ac615cc449..aa8849934f6ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; @@ -138,18 +137,10 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::new); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); scriptCacheStats = null; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - if (in.getVersion().before(LegacyESVersion.V_7_9_0)) { - scriptCacheStats = in.readOptionalWriteable(ScriptCacheStats::new); - } else if (scriptStats != null) { - scriptCacheStats = scriptStats.toScriptCacheStats(); - } - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - } else { - indexingPressureStats = null; + if (scriptStats != null) { + scriptCacheStats = scriptStats.toScriptCacheStats(); } + indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { shardIndexingPressureStats = in.readOptionalWriteable(ShardIndexingPressureStats::new); } else { @@ -327,12 +318,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(discoveryStats); out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0) && out.getVersion().before(LegacyESVersion.V_7_9_0)) { - out.writeOptionalWriteable(scriptCacheStats); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalWriteable(indexingPressureStats); - } + out.writeOptionalWriteable(indexingPressureStats); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeOptionalWriteable(shardIndexingPressureStats); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index a4b24a7a91f1f..794c942a4e7d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -60,18 +59,14 @@ public CancelTasksRequest() {} public CancelTasksRequest(StreamInput in) throws IOException { super(in); this.reason = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - waitForCompletion = in.readBoolean(); - } + waitForCompletion = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(reason); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeBoolean(waitForCompletion); - } + out.writeBoolean(waitForCompletion); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java index 58e43ca9f3568..be2a1141b8a02 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.usage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.StreamInput; @@ -61,11 +60,7 @@ public NodeUsage(StreamInput in) throws IOException { timestamp = in.readLong(); sinceTime = in.readLong(); restUsage = (Map) in.readGenericValue(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - aggregationUsage = (Map) in.readGenericValue(); - } else { - aggregationUsage = null; - } + aggregationUsage = (Map) in.readGenericValue(); } /** @@ -144,9 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(timestamp); out.writeLong(sinceTime); out.writeGenericValue(restUsage); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeGenericValue(aggregationUsage); - } + out.writeGenericValue(aggregationUsage); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index 01f66bd843642..1badfa6d02f15 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.usage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -52,9 +51,7 @@ public class NodesUsageRequest extends BaseNodesRequest { public NodesUsageRequest(StreamInput in) throws IOException { super(in); this.restActions = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - this.aggregations = in.readBoolean(); - } + this.aggregations = in.readBoolean(); } /** @@ -116,8 +113,6 @@ public NodesUsageRequest aggregations(boolean aggregations) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(restActions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeBoolean(aggregations); - } + out.writeBoolean(aggregations); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index bb28623430f2d..e210d54a922b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -281,11 +281,7 @@ public AliasActions(StreamInput in) throws IOException { isHidden = in.readOptionalBoolean(); } originalAliases = in.readStringArray(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - mustExist = in.readOptionalBoolean(); - } else { - mustExist = null; - } + mustExist = in.readOptionalBoolean(); } @Override @@ -303,9 +299,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(isHidden); } out.writeStringArray(originalAliases); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalBoolean(mustExist); - } + out.writeOptionalBoolean(mustExist); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index e93fbe86e4ece..9de7dbadaa7df 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.AliasMetadata; @@ -152,14 +151,12 @@ public GetIndexResponse( } defaultSettings = defaultSettingsMapBuilder.build(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - ImmutableOpenMap.Builder dataStreamsMapBuilder = ImmutableOpenMap.builder(); - int dataStreamsSize = in.readVInt(); - for (int i = 0; i < dataStreamsSize; i++) { - dataStreamsMapBuilder.put(in.readString(), in.readOptionalString()); - } - dataStreams = dataStreamsMapBuilder.build(); + ImmutableOpenMap.Builder dataStreamsMapBuilder = ImmutableOpenMap.builder(); + int dataStreamsSize = in.readVInt(); + for (int i = 0; i < dataStreamsSize; i++) { + dataStreamsMapBuilder.put(in.readString(), in.readOptionalString()); } + dataStreams = dataStreamsMapBuilder.build(); } public String[] indices() { @@ -272,12 +269,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(indexEntry.key); Settings.writeSettingsToStream(indexEntry.value, out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeVInt(dataStreams.size()); - for (ObjectObjectCursor indexEntry : dataStreams) { - out.writeString(indexEntry.key); - out.writeOptionalString(indexEntry.value); - } + out.writeVInt(dataStreams.size()); + for (ObjectObjectCursor indexEntry : dataStreams) { + out.writeString(indexEntry.key); + out.writeOptionalString(indexEntry.value); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index a8eeedd4a3e4c..df660e027ea3d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.put; import com.carrotsearch.hppc.ObjectHashSet; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -119,9 +118,7 @@ public PutMappingRequest(StreamInput in) throws IOException { source = in.readString(); concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - writeIndexOnly = in.readBoolean(); - } + writeIndexOnly = in.readBoolean(); } public PutMappingRequest() {} @@ -348,9 +345,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(source); out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeBoolean(writeIndexOnly); - } + out.writeBoolean(writeIndexOnly); } @Override diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java index f39ba589a3019..9845413fe8af8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java @@ -59,8 +59,7 @@ public class BulkItemRequest implements Writeable, Accountable { private volatile BulkItemResponse primaryResponse; /** - * @param shardId {@code null} if reading from a stream before {@link BulkShardRequest#COMPACT_SHARD_ID_VERSION} to force BwC read - * that includes shard id + * @param shardId the shard id */ BulkItemRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { id = in.readVInt(); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java index a7d0de98981ba..484f57abb8b07 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java @@ -34,8 +34,6 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.common.io.stream.StreamInput; @@ -54,14 +52,13 @@ */ public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable { - public static final Version COMPACT_SHARD_ID_VERSION = LegacyESVersion.V_7_9_0; private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class); private final BulkItemRequest[] items; public BulkShardRequest(StreamInput in) throws IOException { super(in); - final ShardId itemShardId = in.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? shardId : null; + final ShardId itemShardId = shardId; items = in.readArray(i -> i.readOptionalWriteable(inpt -> new BulkItemRequest(itemShardId, inpt)), BulkItemRequest[]::new); } @@ -95,14 +92,14 @@ public String[] indices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeArray(out.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? (o, item) -> { + out.writeArray((o, item) -> { if (item != null) { o.writeBoolean(true); item.writeThin(o); } else { o.writeBoolean(false); } - } : StreamOutput::writeOptionalWriteable, items); + }, items); } @Override diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java index b96ebcb3a3c0a..cfedcde92194c 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java @@ -32,8 +32,6 @@ package org.opensearch.action.bulk; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.support.WriteResponse; import org.opensearch.action.support.replication.ReplicationResponse; @@ -50,18 +48,13 @@ */ public class BulkShardResponse extends ReplicationResponse implements WriteResponse { - private static final Version COMPACT_SHARD_ID_VERSION = LegacyESVersion.V_7_9_0; - private final ShardId shardId; private final BulkItemResponse[] responses; BulkShardResponse(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - responses = in.readArray( - in.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? i -> new BulkItemResponse(shardId, i) : BulkItemResponse::new, - BulkItemResponse[]::new - ); + responses = in.readArray(i -> new BulkItemResponse(shardId, i), BulkItemResponse[]::new); } // NOTE: public for testing only @@ -96,9 +89,6 @@ public void setForcedRefresh(boolean forcedRefresh) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - out.writeArray( - out.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? (o, item) -> item.writeThin(out) : (o, item) -> item.writeTo(o), - responses - ); + out.writeArray((o, item) -> item.writeThin(out), responses); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index de285983b846b..9e23213c02ab6 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SparseFixedBitSet; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; @@ -455,11 +454,7 @@ void createIndex(String index, TimeValue timeout, Version minNodeVersion, Action createIndexRequest.index(index); createIndexRequest.cause("auto(bulk api)"); createIndexRequest.clusterManagerNodeTimeout(timeout); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_8_0)) { - client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener); - } else { - client.admin().indices().create(createIndexRequest, listener); - } + client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener); } private boolean setResponseFailureIfIndexMatches( diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java index 3cfd21087582d..2b6a89c66ef4b 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -70,8 +69,8 @@ public class FieldCapabilitiesIndexRequest extends ActionRequest implements Indi index = in.readOptionalString(); fields = in.readStringArray(); originalIndices = OriginalIndices.readOriginalIndices(in); - indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readLong() : 0L; + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readLong(); } FieldCapabilitiesIndexRequest( @@ -133,10 +132,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(index); out.writeStringArray(fields); OriginalIndices.writeOriginalIndices(originalIndices, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeLong(nowInMillis); - } + out.writeOptionalNamedWriteable(indexFilter); + out.writeLong(nowInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 9225e0cdc6571..4da5d04fe9d7a 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -62,7 +61,7 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse implements Wr super(in); this.indexName = in.readString(); this.responseMap = in.readMap(StreamInput::readString, IndexFieldCapabilities::new); - this.canMatch = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readBoolean() : true; + this.canMatch = in.readBoolean(); } /** @@ -95,9 +94,7 @@ public IndexFieldCapabilities getField(String field) { public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeBoolean(canMatch); - } + out.writeBoolean(canMatch); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 50d60560bdfe4..2e00c55c31dc6 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -73,8 +72,8 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); includeUnmapped = in.readBoolean(); - indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readOptionalLong(); } public FieldCapabilitiesRequest() {} @@ -106,10 +105,8 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); out.writeBoolean(includeUnmapped); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeOptionalLong(nowInMillis); - } + out.writeOptionalNamedWriteable(indexFilter); + out.writeOptionalLong(nowInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java index e971aca855c30..ffae6ee90479c 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java @@ -192,11 +192,8 @@ public SimulateProcessorResult(String type, String processorTag, String descript this.processorTag = in.readString(); this.ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); this.failure = in.readException(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - this.description = in.readOptionalString(); - } else { - this.description = null; - } + this.description = in.readOptionalString(); + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { this.type = in.readString(); boolean hasConditional = in.readBoolean(); @@ -216,9 +213,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(processorTag); out.writeOptionalWriteable(ingestDocument); out.writeException(failure); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalString(description); - } + out.writeOptionalString(description); + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeString(type); out.writeBoolean(conditionalWithResult != null); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 6c7f4ba487568..39c0cf1ba3485 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -44,7 +44,6 @@ import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.StoreStats; import java.io.IOException; import java.util.Map; @@ -100,11 +99,7 @@ public ClusterInfo(StreamInput in) throws IOException { Map sizeMap = in.readMap(StreamInput::readString, StreamInput::readLong); Map routingMap = in.readMap(ShardRouting::new, StreamInput::readString); Map reservedSpaceMap; - if (in.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - reservedSpaceMap = in.readMap(NodeAndPath::new, ReservedSpace::new); - } else { - reservedSpaceMap = org.opensearch.common.collect.Map.of(); - } + reservedSpaceMap = in.readMap(NodeAndPath::new, ReservedSpace::new); ImmutableOpenMap.Builder leastBuilder = ImmutableOpenMap.builder(); this.leastAvailableSpaceUsage = leastBuilder.putAll(leastMap).build(); @@ -128,9 +123,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> out.writeLong(v == null ? -1 : v)); out.writeMap(this.routingToDataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); - if (out.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeMap(this.reservedSpace); - } + out.writeMap(this.reservedSpace); } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index c1c49e1018197..4bb4137f9d7a2 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.action.index; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.AutoPutMappingAction; @@ -157,18 +156,11 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe putMappingRequest.source(mappingUpdate.toString(), XContentType.JSON); putMappingRequest.clusterManagerNodeTimeout(dynamicMappingUpdateTimeout); putMappingRequest.timeout(TimeValue.ZERO); - if (clusterService.state().nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - client.execute( - AutoPutMappingAction.INSTANCE, - putMappingRequest, - ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure) - ); - } else { - client.putMapping( - putMappingRequest, - ActionListener.wrap(r -> listener.onResponse(null), e -> listener.onFailure(unwrapException(e))) - ); - } + client.execute( + AutoPutMappingAction.INSTANCE, + putMappingRequest, + ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure) + ); } // todo: this explicit unwrap should not be necessary, but is until guessRootCause is fixed to allow wrapped non-es exception. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 33c50c938c3db..96ae94f807a04 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -163,11 +162,7 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.priority = in.readOptionalVLong(); this.version = in.readOptionalVLong(); this.metadata = in.readMap(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); - } else { - this.dataStreamTemplate = null; - } + this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); } public List indexPatterns() { @@ -222,9 +217,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(this.priority); out.writeOptionalVLong(this.version); out.writeMap(this.metadata); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalWriteable(dataStreamTemplate); - } + out.writeOptionalWriteable(dataStreamTemplate); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 412d4dba628cb..97771fb6673b8 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchStatusException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.ActionListener; @@ -146,9 +145,6 @@ static ClusterState createDataStream( ClusterState currentState, CreateDataStreamClusterStateUpdateRequest request ) throws Exception { - if (currentState.nodes().getMinNodeVersion().before(LegacyESVersion.V_7_9_0)) { - throw new IllegalStateException("data streams require minimum node version of " + LegacyESVersion.V_7_9_0); - } if (currentState.metadata().dataStreams().containsKey(request.name)) { throw new ResourceAlreadyExistsException("data_stream [" + request.name + "] already exists"); diff --git a/server/src/main/java/org/opensearch/index/store/StoreStats.java b/server/src/main/java/org/opensearch/index/store/StoreStats.java index 8a8af673713e9..f114b9093367d 100644 --- a/server/src/main/java/org/opensearch/index/store/StoreStats.java +++ b/server/src/main/java/org/opensearch/index/store/StoreStats.java @@ -32,8 +32,6 @@ package org.opensearch.index.store; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -55,9 +53,6 @@ public class StoreStats implements Writeable, ToXContentFragment { * prior to receiving the list of files in a peer recovery. */ public static final long UNKNOWN_RESERVED_BYTES = -1L; - - public static final Version RESERVED_BYTES_VERSION = LegacyESVersion.V_7_9_0; - private long sizeInBytes; private long reservedSize; @@ -67,11 +62,7 @@ public StoreStats() { public StoreStats(StreamInput in) throws IOException { sizeInBytes = in.readVLong(); - if (in.getVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - reservedSize = in.readZLong(); - } else { - reservedSize = UNKNOWN_RESERVED_BYTES; - } + reservedSize = in.readZLong(); } /** @@ -124,9 +115,7 @@ public ByteSizeValue getReservedSize() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(sizeInBytes); - if (out.getVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - out.writeZLong(reservedSize); - } + out.writeZLong(reservedSize); } @Override diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 556e4db3400e1..4cc599d968624 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; @@ -724,11 +723,7 @@ private void onException(Exception e) { recoverySettings.retryDelayNetwork(), cause.getMessage() ); - if (request.sourceNode().getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - reestablishRecovery(request, cause.getMessage(), recoverySettings.retryDelayNetwork()); - } else { - retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout()); - } + reestablishRecovery(request, cause.getMessage(), recoverySettings.retryDelayNetwork()); return; } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTransportRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTransportRequest.java index b48fe4003e18a..dc5beec87c2b7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTransportRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTransportRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -51,11 +49,7 @@ public abstract class RecoveryTransportRequest extends TransportRequest { RecoveryTransportRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - requestSeqNo = in.readLong(); - } else { - requestSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + requestSeqNo = in.readLong(); } RecoveryTransportRequest(long requestSeqNo) { @@ -69,8 +63,6 @@ public long requestSeqNo() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeLong(requestSeqNo); - } + out.writeLong(requestSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java index a7113fb3fee5c..9637b4a791e5d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.RetryableAction; @@ -99,7 +98,7 @@ public void tryAction(ActionListener listener) { @Override public boolean shouldRetry(Exception e) { - return targetNode.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) && retryableException(e); + return retryableException(e); } }; onGoingRetryableActions.put(key, retryableAction); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java index d1066b867f982..31816157b4515 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java @@ -18,7 +18,6 @@ import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.index.store.StoreStats; import org.opensearch.indices.recovery.RecoveryState; import java.io.IOException; @@ -324,15 +323,7 @@ public FilesDetails() {} FileMetadata file = new FileMetadata(in); fileMetadataMap.put(file.name, file); } - if (in.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - complete = in.readBoolean(); - } else { - // This flag is used by disk-based allocation to decide whether the remaining bytes measurement is accurate or not; if not - // then it falls back on an estimate. There's only a very short window in which the file details are present but incomplete - // so this is a reasonable approximation, and the stats reported to the disk-based allocator don't hit this code path - // anyway since they always use IndexShard#getRecoveryState which is never transported over the wire. - complete = fileMetadataMap.isEmpty() == false; - } + complete = in.readBoolean(); } @Override @@ -342,9 +333,7 @@ public void writeTo(StreamOutput out) throws IOException { for (FileMetadata file : files) { file.writeTo(out); } - if (out.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeBoolean(complete); - } + out.writeBoolean(complete); } @Override diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index 9c8f1157cb718..2a4c69a622b18 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -32,7 +32,6 @@ package org.opensearch.script; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -90,7 +89,7 @@ public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); compilationLimitTriggered = in.readVLong(); - contextStats = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readList(ScriptContextStats::new) : Collections.emptyList(); + contextStats = in.readList(ScriptContextStats::new); } @Override @@ -98,9 +97,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); out.writeVLong(compilationLimitTriggered); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeList(contextStats); - } + out.writeList(contextStats); } public List getContextStats() { diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 89eeddb07ce5d..4f14e5bf274e9 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -33,7 +33,6 @@ package org.opensearch.search; import org.apache.lucene.search.Explanation; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; @@ -179,20 +178,8 @@ public SearchHit(StreamInput in) throws IOException { if (in.readBoolean()) { explanation = readExplanation(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - documentFields = in.readMap(StreamInput::readString, DocumentField::new); - metaFields = in.readMap(StreamInput::readString, DocumentField::new); - } else { - Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - fields.forEach( - (fieldName, docField) -> (MapperService.META_FIELDS_BEFORE_7DOT8.contains(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields = in.readMap(StreamInput::readString, DocumentField::new); + metaFields = in.readMap(StreamInput::readString, DocumentField::new); int size = in.readVInt(); if (size == 0) { @@ -283,12 +270,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); writeExplanation(out, explanation); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeMap(documentFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); - out.writeMap(metaFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); - } else { - writeFields(out, this.getFields()); - } + out.writeMap(documentFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); + out.writeMap(metaFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); if (highlightFields == null) { out.writeVInt(0); } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index a580d17099ec2..3b6e467a8a732 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.TriFunction; @@ -169,14 +168,7 @@ public static > ConstructingO super(in); values = in.readDoubleArray(); keyed = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - percentilesConfig = (PercentilesConfig) in.readOptionalWriteable((Reader) PercentilesConfig::fromStream); - } else { - int numberOfSignificantValueDigits = in.readVInt(); - double compression = in.readDouble(); - PercentilesMethod method = PercentilesMethod.readFromStream(in); - percentilesConfig = PercentilesConfig.fromLegacy(method, compression, numberOfSignificantValueDigits); - } + percentilesConfig = (PercentilesConfig) in.readOptionalWriteable((Reader) PercentilesConfig::fromStream); this.valuesField = valuesField; } @@ -184,23 +176,7 @@ public static > ConstructingO protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(values); out.writeBoolean(keyed); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeOptionalWriteable(percentilesConfig); - } else { - // Legacy method serialized both SigFigs and compression, even though we only need one. So we need - // to serialize the default for the unused method - int numberOfSignificantValueDigits = percentilesConfig.getMethod().equals(PercentilesMethod.HDR) - ? ((PercentilesConfig.Hdr) percentilesConfig).getNumberOfSignificantValueDigits() - : PercentilesConfig.Hdr.DEFAULT_NUMBER_SIG_FIGS; - - double compression = percentilesConfig.getMethod().equals(PercentilesMethod.TDIGEST) - ? ((PercentilesConfig.TDigest) percentilesConfig).getCompression() - : PercentilesConfig.TDigest.DEFAULT_COMPRESSION; - - out.writeVInt(numberOfSignificantValueDigits); - out.writeDouble(compression); - percentilesConfig.getMethod().writeTo(out); - } + out.writeOptionalWriteable(percentilesConfig); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java index 9244d661a699e..7f5841ec792a0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; @@ -49,8 +48,6 @@ import java.util.Map; import java.util.Objects; -import static java.util.Collections.singletonList; - /** * Implementation of scripted metric agg * @@ -72,30 +69,13 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip public InternalScriptedMetric(StreamInput in) throws IOException { super(in); reduceScript = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_8_0)) { - aggregations = singletonList(in.readGenericValue()); - } else { - aggregations = in.readList(StreamInput::readGenericValue); - } + aggregations = in.readList(StreamInput::readGenericValue); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(reduceScript); - if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - if (aggregations.size() > 1) { - /* - * If aggregations has more than one entry we're trying to - * serialize an unreduced aggregation. This *should* only - * happen when we're returning a scripted_metric over cross - * cluster search. - */ - throw new IllegalArgumentException("scripted_metric doesn't support cross cluster search until 7.8.0"); - } - out.writeGenericValue(aggregations.get(0)); - } else { - out.writeCollection(aggregations, StreamOutput::writeGenericValue); - } + out.writeCollection(aggregations, StreamOutput::writeGenericValue); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 63fce83369c18..f31d17919e7e3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.TriFunction; @@ -89,11 +88,7 @@ protected MultiValuesSourceFieldConfig(String fieldName, Object missing, Script public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - this.filter = in.readOptionalNamedWriteable(QueryBuilder.class); - } else { - this.filter = null; - } + this.filter = in.readOptionalNamedWriteable(QueryBuilder.class); } public QueryBuilder getFilter() { @@ -102,9 +97,7 @@ public QueryBuilder getFilter() { @Override public void doWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeOptionalNamedWriteable(filter); - } + out.writeOptionalNamedWriteable(filter); } @Override diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index c05c574b77942..2bfe57a0b840a 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -32,7 +32,6 @@ package org.opensearch.search.profile; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -104,11 +103,7 @@ public ProfileResult(StreamInput in) throws IOException { this.description = in.readString(); this.nodeTime = in.readLong(); breakdown = in.readMap(StreamInput::readString, StreamInput::readLong); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - debug = in.readMap(StreamInput::readString, StreamInput::readGenericValue); - } else { - debug = org.opensearch.common.collect.Map.of(); - } + debug = in.readMap(StreamInput::readString, StreamInput::readGenericValue); children = in.readList(ProfileResult::new); } @@ -118,9 +113,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(description); out.writeLong(nodeTime); // not Vlong because can be negative out.writeMap(breakdown, StreamOutput::writeString, StreamOutput::writeLong); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeMap(debug, StreamOutput::writeString, StreamOutput::writeGenericValue); - } + out.writeMap(debug, StreamOutput::writeString, StreamOutput::writeGenericValue); out.writeList(children); } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 60c01d0b04639..6e8c93791cedc 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -38,7 +38,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -542,13 +541,8 @@ public ClusterState execute(ClusterState currentState) { final Settings.Builder indexSettingsBuilder = Settings.builder() .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, currentIndexMetadata.getIndexUUID()); - // Only add a restore uuid if either all nodes in the cluster support it (version >= 7.9) or if the - // index itself was created after 7.9 and thus won't be restored to a node that doesn't support the - // setting anyway - if (snapshotIndexMetadata.getCreationVersion().onOrAfter(LegacyESVersion.V_7_9_0) - || currentState.nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - indexSettingsBuilder.put(SETTING_HISTORY_UUID, UUIDs.randomBase64UUID()); - } + // add a restore uuid + indexSettingsBuilder.put(SETTING_HISTORY_UUID, UUIDs.randomBase64UUID()); indexMdBuilder.settings(indexSettingsBuilder); IndexMetadata updatedIndexMetadata = indexMdBuilder.index(renamedIndexName).build(); rtBuilder.addAsRestore(updatedIndexMetadata, recoverySource); diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java index 4073a48bcde0d..ebce26c4bbfbc 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchSecurityException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -214,11 +213,7 @@ private BanParentTaskRequest(StreamInput in) throws IOException { parentTaskId = TaskId.readFromStream(in); ban = in.readBoolean(); reason = ban ? in.readString() : null; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - waitForCompletion = in.readBoolean(); - } else { - waitForCompletion = false; - } + waitForCompletion = in.readBoolean(); } @Override @@ -229,9 +224,7 @@ public void writeTo(StreamOutput out) throws IOException { if (ban) { out.writeString(reason); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeBoolean(waitForCompletion); - } + out.writeBoolean(waitForCompletion); } } diff --git a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java index 7325024138500..1998dbf9c4c5c 100644 --- a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.action.index; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.AutoPutMappingAction; @@ -154,39 +153,9 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe assertTrue(fut2.isDone()); } - public void testSendUpdateMappingUsingPutMappingAction() { - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("first", buildNewFakeTransportAddress(), LegacyESVersion.V_7_8_0)) - .build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).nodes(nodes).build(); - ClusterService clusterService = mock(ClusterService.class); - when(clusterService.state()).thenReturn(clusterState); - - IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); - AdminClient adminClient = mock(AdminClient.class); - when(adminClient.indices()).thenReturn(indicesAdminClient); - Client client = mock(Client.class); - when(client.admin()).thenReturn(adminClient); - - MappingUpdatedAction mua = new MappingUpdatedAction( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - clusterService - ); - mua.setClient(client); - - Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).build(); - final Mapper.BuilderContext context = new Mapper.BuilderContext(indexSettings, new ContentPath()); - RootObjectMapper rootObjectMapper = new RootObjectMapper.Builder("name").build(context); - Mapping update = new Mapping(LegacyESVersion.V_7_8_0, rootObjectMapper, new MetadataFieldMapper[0], Map.of()); - - mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.wrap(() -> {})); - verify(indicesAdminClient).putMapping(any(), any()); - } - public void testSendUpdateMappingUsingAutoPutMappingAction() { DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("first", buildNewFakeTransportAddress(), LegacyESVersion.V_7_9_0)) + .add(new DiscoveryNode("first", buildNewFakeTransportAddress(), Version.V_3_0_0)) .build(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).nodes(nodes).build(); ClusterService clusterService = mock(ClusterService.class); @@ -208,7 +177,7 @@ public void testSendUpdateMappingUsingAutoPutMappingAction() { Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).build(); final Mapper.BuilderContext context = new Mapper.BuilderContext(indexSettings, new ContentPath()); RootObjectMapper rootObjectMapper = new RootObjectMapper.Builder("name").build(context); - Mapping update = new Mapping(LegacyESVersion.V_7_9_0, rootObjectMapper, new MetadataFieldMapper[0], Map.of()); + Mapping update = new Mapping(Version.V_3_0_0, rootObjectMapper, new MetadataFieldMapper[0], Map.of()); mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.wrap(() -> {})); verify(indicesAdminClient).execute(eq(AutoPutMappingAction.INSTANCE), any(), any()); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 8c30a8ff19c89..4f16e7526c9ea 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.node; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; @@ -153,11 +152,11 @@ public DiscoveryNodeRole getCompatibilityRole(Version nodeVersion) { { BytesStreamOutput streamOutput = new BytesStreamOutput(); - streamOutput.setVersion(LegacyESVersion.V_7_9_0); + streamOutput.setVersion(Version.V_2_0_0); node.writeTo(streamOutput); StreamInput in = StreamInput.wrap(streamOutput.bytes().toBytesRef().bytes); - in.setVersion(LegacyESVersion.V_7_9_0); + in.setVersion(Version.V_2_0_0); DiscoveryNode serialized = new DiscoveryNode(in); assertThat(serialized.getRoles().stream().map(DiscoveryNodeRole::roleName).collect(Collectors.joining()), equalTo("data")); } diff --git a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java index f6489d264a6d3..957f3b936a614 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java @@ -33,7 +33,6 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; @@ -443,7 +442,7 @@ public void testAnalyzers() { public void testDeprecatedParameters() { // 'index' is declared explicitly, 'store' is not, but is one of the previously always-accepted params String mapping = "{\"type\":\"test_mapper\",\"index\":false,\"store\":true,\"required\":\"value\"}"; - TestMapper mapper = fromMapping(mapping, LegacyESVersion.V_7_8_0); + TestMapper mapper = fromMapping(mapping, Version.V_1_0_0); assertWarnings("Parameter [store] has no effect on type [test_mapper] and will be removed in future"); assertFalse(mapper.index); assertEquals("{\"field\":{\"type\":\"test_mapper\",\"index\":false,\"required\":\"value\"}}", Strings.toString(mapper)); From 763ef13ad42cb912fb406c5d42fa7f76e3708e89 Mon Sep 17 00:00:00 2001 From: Sergey Nuyanzin Date: Tue, 1 Nov 2022 14:03:46 +0100 Subject: [PATCH 112/122] Use getParameterCount instead of getParameterTypes (#4821) * Use getParameterCount instead of getParameterTypes Signed-off-by: Sergey Nuyanzin * Update changelog Signed-off-by: Sergey Nuyanzin * Apply spotless Signed-off-by: Sergey Nuyanzin * Address feedback Signed-off-by: Sergey Nuyanzin * Address feedback Signed-off-by: Sergey Nuyanzin Signed-off-by: Sergey Nuyanzin Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + .../gradle/test/JUnit3MethodProvider.java | 2 +- .../client/RestHighLevelClientTests.java | 49 +++++++++---------- .../org/opensearch/painless/Compiler.java | 5 +- .../painless/PainlessScriptEngine.java | 26 +++++----- .../opensearch/painless/ScriptClassInfo.java | 4 +- .../lookup/PainlessLookupBuilder.java | 5 +- .../OpenSearchLoggerUsageTests.java | 44 +++++++++-------- 8 files changed, 70 insertions(+), 66 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 218c291e1417c..7b6d0238b9345 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added resource usage trackers for in-flight cancellation of SearchShardTask ([#4805](https://github.com/opensearch-project/OpenSearch/pull/4805)) - Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) - Update previous release bwc version to 2.5.0 ([#5003](https://github.com/opensearch-project/OpenSearch/pull/5003)) +- Use getParameterCount instead of getParameterTypes ([#4821](https://github.com/opensearch-project/OpenSearch/pull/4821)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 0c01b6d519d62..163a903d31832 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -59,7 +59,7 @@ public Collection getTestMethods(Class suiteClass, ClassModel classMo if (m.getName().startsWith("test") && Modifier.isPublic(m.getModifiers()) && !Modifier.isStatic(m.getModifiers()) - && m.getParameterTypes().length == 0) { + && m.getParameterCount() == 0) { result.add(m); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index d522bf8c4d005..dc89b605be689 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -1005,37 +1005,34 @@ private static void assertSyncMethod(Method method, String apiName, List } assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); + final Class[] parameterTypes = method.getParameterTypes(); // a few methods don't accept a request object as argument if (APIS_WITHOUT_REQUEST_OBJECT.contains(apiName)) { - assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); - assertThat( - "the parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0], - equalTo(RequestOptions.class) - ); + assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterCount()); + assertThat("the parameter to method [" + method + "] is the wrong type", parameterTypes[0], equalTo(RequestOptions.class)); } else { - assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); + assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterCount()); // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation - if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + if (parameterTypes[0].equals(RequestOptions.class)) { assertThat( "the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0], + parameterTypes[0], equalTo(RequestOptions.class) ); assertThat( "the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1].getSimpleName(), + parameterTypes[1].getSimpleName(), endsWith("Request") ); } else { assertThat( "the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); } @@ -1049,39 +1046,40 @@ private static void assertAsyncMethod(Map> methods, Method m ); assertThat("async method [" + method + "] should return Cancellable", method.getReturnType(), equalTo(Cancellable.class)); assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); + final Class[] parameterTypes = method.getParameterTypes(); if (APIS_WITHOUT_REQUEST_OBJECT.contains(apiName.replaceAll("_async$", ""))) { - assertEquals(2, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); - assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); + assertEquals(2, parameterTypes.length); + assertThat(parameterTypes[0], equalTo(RequestOptions.class)); + assertThat(parameterTypes[1], equalTo(ActionListener.class)); } else { - assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); + assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterCount()); // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation - if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + if (parameterTypes[0].equals(RequestOptions.class)) { assertThat( "the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0], + parameterTypes[0], equalTo(RequestOptions.class) ); assertThat( "the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1].getSimpleName(), + parameterTypes[1].getSimpleName(), endsWith("Request") ); } else { assertThat( "the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); } assertThat( "the third parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[2], + parameterTypes[2], equalTo(ActionListener.class) ); } @@ -1094,16 +1092,17 @@ private static void assertSubmitTaskMethod( ClientYamlSuiteRestSpec restSpec ) { String methodName = extractMethodName(apiName); + final Class[] parameterTypes = method.getParameterTypes(); assertTrue("submit task method [" + method.getName() + "] doesn't have corresponding sync method", methods.containsKey(methodName)); - assertEquals("submit task method [" + method + "] has the wrong number of arguments", 2, method.getParameterTypes().length); + assertEquals("submit task method [" + method + "] has the wrong number of arguments", 2, method.getParameterCount()); assertThat( "the first parameter to submit task method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to submit task method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 7b9efa4deb207..35c676653fdc3 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -212,8 +212,9 @@ private static void addFactoryMethod(Map> additionalClasses, Cl } additionalClasses.put(factoryClass.getName(), factoryClass); - for (int i = 0; i < factoryMethod.getParameterTypes().length; ++i) { - Class parameterClazz = factoryMethod.getParameterTypes()[i]; + final Class[] parameterTypes = factoryMethod.getParameterTypes(); + for (int i = 0; i < parameterTypes.length; ++i) { + Class parameterClazz = parameterTypes[i]; additionalClasses.put(parameterClazz.getName(), parameterClazz); } } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java index ca6e68706709a..e9edfb73c740c 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java @@ -195,11 +195,12 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context } } - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + final Class[] parameterTypes = newFactory.getParameterTypes(); + for (int count = 0; count < parameterTypes.length; ++count) { writer.visitField( Opcodes.ACC_PRIVATE | Opcodes.ACC_FINAL, "$arg" + count, - Type.getType(newFactory.getParameterTypes()[count]).getDescriptor(), + Type.getType(parameterTypes[count]).getDescriptor(), null, null ).visitEnd(); @@ -211,7 +212,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context ); org.objectweb.asm.commons.Method init = new org.objectweb.asm.commons.Method( "", - MethodType.methodType(void.class, newFactory.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(void.class, parameterTypes).toMethodDescriptorString() ); GeneratorAdapter constructor = new GeneratorAdapter( @@ -223,10 +224,10 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context constructor.loadThis(); constructor.invokeConstructor(OBJECT_TYPE, base); - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + for (int count = 0; count < parameterTypes.length; ++count) { constructor.loadThis(); constructor.loadArg(count); - constructor.putField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + constructor.putField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(parameterTypes[count])); } constructor.returnValue(); @@ -247,7 +248,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context MethodType.methodType(newInstance.getReturnType(), newInstance.getParameterTypes()).toMethodDescriptorString() ); - List> parameters = new ArrayList<>(Arrays.asList(newFactory.getParameterTypes())); + List> parameters = new ArrayList<>(Arrays.asList(parameterTypes)); parameters.addAll(Arrays.asList(newInstance.getParameterTypes())); org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method( @@ -264,9 +265,9 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context adapter.newInstance(WriterConstants.CLASS_TYPE); adapter.dup(); - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + for (int count = 0; count < parameterTypes.length; ++count) { adapter.loadThis(); - adapter.getField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + adapter.getField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(parameterTypes[count])); } adapter.loadArgs(); @@ -334,13 +335,14 @@ private T generateFactory(Loader loader, ScriptContext context, Type clas } } + final Class[] parameterTypes = reflect.getParameterTypes(); org.objectweb.asm.commons.Method instance = new org.objectweb.asm.commons.Method( reflect.getName(), - MethodType.methodType(reflect.getReturnType(), reflect.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(reflect.getReturnType(), parameterTypes).toMethodDescriptorString() ); org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method( "", - MethodType.methodType(void.class, reflect.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(void.class, parameterTypes).toMethodDescriptorString() ); GeneratorAdapter adapter = new GeneratorAdapter( @@ -421,9 +423,7 @@ private T generateFactory(Loader loader, ScriptContext context, Type clas private void writeNeedsMethods(Class clazz, ClassWriter writer, Set extractedVariables) { for (Method method : clazz.getMethods()) { - if (method.getName().startsWith("needs") - && method.getReturnType().equals(boolean.class) - && method.getParameterTypes().length == 0) { + if (method.getName().startsWith("needs") && method.getReturnType().equals(boolean.class) && method.getParameterCount() == 0) { String name = method.getName(); name = name.substring(5); name = Character.toLowerCase(name.charAt(0)) + name.substring(1); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java index e80f92442680a..26dcb4adabea3 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java @@ -88,7 +88,7 @@ public ScriptClassInfo(PainlessLookup painlessLookup, Class baseClass) { + "] has more than one." ); } - } else if (m.getName().startsWith("needs") && m.getReturnType() == boolean.class && m.getParameterTypes().length == 0) { + } else if (m.getName().startsWith("needs") && m.getReturnType() == boolean.class && m.getParameterCount() == 0) { needsMethods.add(new org.objectweb.asm.commons.Method(m.getName(), NEEDS_PARAMETER_METHOD_TYPE.toMethodDescriptorString())); } else if (m.getName().startsWith("get") && m.getName().equals("getClass") == false @@ -124,7 +124,7 @@ public ScriptClassInfo(PainlessLookup painlessLookup, Class baseClass) { FunctionTable.LocalFunction defConverter = null; for (java.lang.reflect.Method m : baseClass.getMethods()) { if (m.getName().startsWith("convertFrom") - && m.getParameterTypes().length == 1 + && m.getParameterCount() == 1 && m.getReturnType() == returnType && Modifier.isStatic(m.getModifiers())) { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e43d1beb9b25b..e79eda975f417 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -2168,9 +2168,10 @@ private void generateBridgeMethod(PainlessClassBuilder painlessClassBuilder, Pai bridgeMethodWriter.loadArg(0); } - for (int typeParameterCount = 0; typeParameterCount < javaMethod.getParameterTypes().length; ++typeParameterCount) { + final Class[] typeParameters = javaMethod.getParameterTypes(); + for (int typeParameterCount = 0; typeParameterCount < typeParameters.length; ++typeParameterCount) { bridgeMethodWriter.loadArg(typeParameterCount + bridgeTypeParameterOffset); - Class typeParameter = javaMethod.getParameterTypes()[typeParameterCount]; + Class typeParameter = typeParameters[typeParameterCount]; if (typeParameter == Byte.class) bridgeMethodWriter.invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_BYTE_IMPLICIT); else if (typeParameter == Short.class) bridgeMethodWriter.invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_SHORT_IMPLICIT); diff --git a/test/logger-usage/src/test/java/org/opensearch/test/loggerusage/OpenSearchLoggerUsageTests.java b/test/logger-usage/src/test/java/org/opensearch/test/loggerusage/OpenSearchLoggerUsageTests.java index e9723c269763c..3bbc8100955f3 100644 --- a/test/logger-usage/src/test/java/org/opensearch/test/loggerusage/OpenSearchLoggerUsageTests.java +++ b/test/logger-usage/src/test/java/org/opensearch/test/loggerusage/OpenSearchLoggerUsageTests.java @@ -79,31 +79,32 @@ public void testLoggerUsageChecks() throws IOException { } } - public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchMethodException { + public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() { for (Method method : Logger.class.getMethods()) { if (OpenSearchLoggerUsageChecker.LOGGER_METHODS.contains(method.getName())) { - assertThat(method.getParameterTypes().length, greaterThanOrEqualTo(1)); - int markerOffset = method.getParameterTypes()[0].equals(Marker.class) ? 1 : 0; - int paramLength = method.getParameterTypes().length - markerOffset; + assertThat(method.getParameterCount(), greaterThanOrEqualTo(1)); + final Class[] parameterTypes = method.getParameterTypes(); + int markerOffset = parameterTypes[0].equals(Marker.class) ? 1 : 0; + int paramLength = parameterTypes.length - markerOffset; if (method.isVarArgs()) { assertEquals(2, paramLength); - assertEquals(String.class, method.getParameterTypes()[markerOffset]); - assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Object[].class, Supplier[].class))); + assertEquals(String.class, parameterTypes[markerOffset]); + assertThat(parameterTypes[markerOffset + 1], is(oneOf(Object[].class, Supplier[].class))); } else { - assertThat(method.getParameterTypes()[markerOffset], is(oneOf(Message.class, MessageSupplier.class, + assertThat(parameterTypes[markerOffset], is(oneOf(Message.class, MessageSupplier.class, CharSequence.class, Object.class, String.class, Supplier.class))); if (paramLength == 2) { - assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Throwable.class, Object.class))); - if (method.getParameterTypes()[markerOffset + 1].equals(Object.class)) { - assertEquals(String.class, method.getParameterTypes()[markerOffset]); + assertThat(parameterTypes[markerOffset + 1], is(oneOf(Throwable.class, Object.class))); + if (parameterTypes[markerOffset + 1].equals(Object.class)) { + assertEquals(String.class, parameterTypes[markerOffset]); } } if (paramLength > 2) { - assertEquals(String.class, method.getParameterTypes()[markerOffset]); + assertEquals(String.class, parameterTypes[markerOffset]); assertThat(paramLength, lessThanOrEqualTo(11)); for (int i = 1; i < paramLength; i++) { - assertEquals(Object.class, method.getParameterTypes()[markerOffset + i]); + assertEquals(Object.class, parameterTypes[markerOffset + i]); } } } @@ -115,16 +116,17 @@ public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchM } for (Constructor constructor : ParameterizedMessage.class.getConstructors()) { - assertThat(constructor.getParameterTypes().length, greaterThanOrEqualTo(2)); - assertEquals(String.class, constructor.getParameterTypes()[0]); - assertThat(constructor.getParameterTypes()[1], is(oneOf(String[].class, Object[].class, Object.class))); - - if (constructor.getParameterTypes().length > 2) { - assertEquals(3, constructor.getParameterTypes().length); - if (constructor.getParameterTypes()[1].equals(Object.class)) { - assertEquals(Object.class, constructor.getParameterTypes()[2]); + assertThat(constructor.getParameterCount(), greaterThanOrEqualTo(2)); + final Class[] parameterTypes = constructor.getParameterTypes(); + assertEquals(String.class, parameterTypes[0]); + assertThat(parameterTypes[1], is(oneOf(String[].class, Object[].class, Object.class))); + + if (parameterTypes.length > 2) { + assertEquals(3, parameterTypes.length); + if (parameterTypes[1].equals(Object.class)) { + assertEquals(Object.class, parameterTypes[2]); } else { - assertEquals(Throwable.class, constructor.getParameterTypes()[2]); + assertEquals(Throwable.class, parameterTypes[2]); } } } From fab43365dabc4e15312570454b4a804be08d6256 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 1 Nov 2022 08:26:31 -0500 Subject: [PATCH 113/122] [Remove] LegacyESVersion.V_7_6_* and V_7_7_* constants (#4837) Removes all usages of LegacyESVersion.V_7_6_ and LegacyESVersion.V_7_7_ version constants along with ancient API logic. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../common/CommonAnalysisModulePlugin.java | 30 ++++++++++-- .../ConcatenateGraphTokenFilterFactory.java | 20 ++------ .../GeoTileGridValuesSourceBuilder.java | 9 +--- .../geogrid/GeoGridAggregationBuilder.java | 9 +--- .../index/rankeval/RankEvalRequest.java | 9 +--- .../org/opensearch/upgrades/RecoveryIT.java | 8 +--- .../opensearch/index/shard/IndexShardIT.java | 27 ++++++++++- .../java/org/opensearch/LegacyESVersion.java | 5 -- .../cluster/node/info/NodesInfoRequest.java | 35 +------------- .../NodesReloadSecureSettingsRequest.java | 21 ++++----- .../cluster/node/stats/NodesStatsRequest.java | 35 +------------- .../restore/RestoreSnapshotRequest.java | 6 --- .../cluster/stats/ClusterStatsResponse.java | 17 +++---- .../action/admin/indices/alias/Alias.java | 11 +---- .../indices/alias/IndicesAliasesRequest.java | 11 +---- .../indices/forcemerge/ForceMergeRequest.java | 17 +++---- .../action/bulk/BulkItemResponse.java | 11 +---- .../action/fieldcaps/FieldCapabilities.java | 11 +---- .../fieldcaps/IndexFieldCapabilities.java | 46 ++++--------------- .../action/search/SearchRequest.java | 13 +----- .../action/search/TransportSearchHelper.java | 10 +--- .../action/support/IndicesOptions.java | 12 +---- .../cluster/coordination/JoinRequest.java | 13 ++---- .../cluster/metadata/AliasMetadata.java | 13 +----- .../cluster/metadata/AutoExpandReplicas.java | 14 ++---- .../metadata/ComponentTemplateMetadata.java | 5 +- .../ComposableIndexTemplateMetadata.java | 5 +- .../cluster/metadata/DataStreamMetadata.java | 5 +- .../cluster/metadata/RepositoryMetadata.java | 19 ++------ .../cluster/routing/RecoverySource.java | 25 ++-------- .../java/org/opensearch/common/Rounding.java | 4 -- ...ransportNodesListGatewayStartedShards.java | 23 ++-------- .../org/opensearch/index/engine/Engine.java | 2 +- .../index/mapper/RootObjectMapper.java | 4 +- .../index/seqno/ReplicationTracker.java | 4 +- .../indices/analysis/AnalysisModule.java | 16 ++----- .../TransportNodesListShardStoreMetadata.java | 23 ++-------- .../org/opensearch/ingest/IngestStats.java | 9 +--- .../blobstore/BlobStoreRepository.java | 4 -- .../opensearch/script/ScriptException.java | 31 +++++++------ .../org/opensearch/search/SearchService.java | 11 +---- .../DateHistogramValuesSourceBuilder.java | 9 +--- .../bucket/composite/InternalComposite.java | 8 +--- .../BaseMultiValuesSourceFieldConfig.java | 13 +----- .../search/internal/ShardSearchContextId.java | 11 +---- .../search/internal/ShardSearchRequest.java | 13 ++---- .../opensearch/transport/InboundDecoder.java | 10 ++-- .../opensearch/transport/OutboundMessage.java | 7 +-- .../transport/ProxyConnectionStrategy.java | 11 +---- .../transport/RemoteConnectionInfo.java | 44 ++++-------------- .../org/opensearch/transport/TcpHeader.java | 21 ++------- .../opensearch/transport/TransportLogger.java | 8 +--- .../ExceptionSerializationTests.java | 6 +-- .../action/OriginalIndicesTests.java | 10 +--- .../ClusterSearchShardsRequestTests.java | 8 +--- .../indices/close/CloseIndexRequestTests.java | 15 +----- .../search/TransportSearchHelperTests.java | 20 ++------ .../transport/InboundDecoderTests.java | 10 +--- .../test/rest/OpenSearchRestTestCase.java | 42 +++++++---------- 60 files changed, 237 insertions(+), 633 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b6d0238b9345..9af227a6ba710 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -127,6 +127,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) - Revert PR 4656 to unblock Windows CI ([#4949](https://github.com/opensearch-project/OpenSearch/pull/4949)) - Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index b0850d0b9144d..1ee4eff6ba055 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -124,7 +124,7 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; @@ -347,7 +347,12 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "nGram_tokenizer_deprecation", "The [nGram] tokenizer name is deprecated and will be removed in a future version. " @@ -358,7 +363,12 @@ public Map> getTokenizers() { }); tokenizers.put("ngram", NGramTokenizerFactory::new); tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "edgeNGram_tokenizer_deprecation", "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " @@ -606,7 +616,12 @@ public List getPreConfiguredTokenizers() { // Temporary shim for aliases. TODO deprecate after they are moved tokenizers.add(PreConfiguredTokenizer.openSearchVersion("nGram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_6_0)) { + if (version.onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "nGram_tokenizer_deprecation", "The [nGram] tokenizer name is deprecated and will be removed in a future version. " @@ -616,7 +631,12 @@ public List getPreConfiguredTokenizers() { return new NGramTokenizer(); })); tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edgeNGram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_6_0)) { + if (version.onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "edgeNGram_tokenizer_deprecation", "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java index 0d1a2b185d1d3..7c1c15ef74e30 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java @@ -11,7 +11,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -24,11 +23,6 @@ * max_graph_expansions is 100 as the default value of 10_000 seems to be unnecessarily large and preserve_separator is false. * *

      - *
    • preserve_separator: - * For LegacyESVersion lesser than {@link LegacyESVersion#V_7_6_0} i.e. lucene versions lesser - * than {@link org.apache.lucene.util.Version#LUCENE_8_4_0} - * Whether {@link ConcatenateGraphFilter#SEP_LABEL} should separate the input tokens in the concatenated token. - *
    • *
    • token_separator: * Separator to use for concatenation. Must be a String with a single character or empty. * If not present, {@link ConcatenateGraphTokenFilterFactory#DEFAULT_TOKEN_SEPARATOR} will be used. @@ -59,17 +53,11 @@ public class ConcatenateGraphTokenFilterFactory extends AbstractTokenFilterFacto ConcatenateGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { // i.e. Lucene 8.4.0 - String separator = settings.get("token_separator", DEFAULT_TOKEN_SEPARATOR); - if (separator.length() > 1) { - throw new IllegalArgumentException("token_separator must be either empty or a single character"); - } - tokenSeparator = separator.length() == 0 ? null : separator.charAt(0); // null means no separator while concatenating - } else { - boolean preserveSep = settings.getAsBoolean("preserve_separator", ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP); - tokenSeparator = preserveSep ? ConcatenateGraphFilter.DEFAULT_TOKEN_SEPARATOR : null; + String separator = settings.get("token_separator", DEFAULT_TOKEN_SEPARATOR); + if (separator.length() > 1) { + throw new IllegalArgumentException("token_separator must be either empty or a single character"); } - + tokenSeparator = separator.length() == 0 ? null : separator.charAt(0); // null means no separator while concatenating maxGraphExpansions = settings.getAsInt("max_graph_expansions", DEFAULT_MAX_GRAPH_EXPANSIONS); preservePositionIncrements = settings.getAsBoolean("preserve_position_increments", DEFAULT_PRESERVE_POSITION_INCREMENTS); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 84d5943da287f..9e671118637b9 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; @@ -175,9 +174,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { public GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.precision = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - this.geoBoundingBox = new GeoBoundingBox(in); - } + this.geoBoundingBox = new GeoBoundingBox(in); } public GeoTileGridValuesSourceBuilder precision(int precision) { @@ -198,9 +195,7 @@ public GeoTileGridValuesSourceBuilder format(String format) { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeInt(precision); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox.writeTo(out); - } + geoBoundingBox.writeTo(out); } @Override diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 0ca2a28844f99..abc892396fbf7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoBoundingBox; @@ -125,9 +124,7 @@ public GeoGridAggregationBuilder(StreamInput in) throws IOException { precision = in.readVInt(); requiredSize = in.readVInt(); shardSize = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox = new GeoBoundingBox(in); - } + geoBoundingBox = new GeoBoundingBox(in); } @Override @@ -140,9 +137,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(precision); out.writeVInt(requiredSize); out.writeVInt(shardSize); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox.writeTo(out); - } + geoBoundingBox.writeTo(out); } /** diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java index 66db397865a0b..d38307fc2194a 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java @@ -32,7 +32,6 @@ package org.opensearch.index.rankeval; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -69,9 +68,7 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { rankingEvaluationSpec = new RankEvalSpec(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - searchType = SearchType.fromId(in.readByte()); - } + searchType = SearchType.fromId(in.readByte()); } RankEvalRequest() {} @@ -150,9 +147,7 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeByte(searchType.id()); - } + out.writeByte(searchType.id()); } @Override diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 870398ddf6fec..8c303eb5d0b55 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -720,12 +720,8 @@ public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final int numberOfReplicas = Integer.parseInt( getIndexSettingsAsMap(indexName).get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS).toString()); - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_6_0)) { - assertEquals(nodes.size() - 2, numberOfReplicas); - ensureGreen(indexName); - } else { - assertEquals(nodes.size() - 1, numberOfReplicas); - } + assertEquals(nodes.size() - 2, numberOfReplicas); + ensureGreen(indexName); } public void testSoftDeletesDisabledWarning() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 3d8da7eac7690..959b04b7861a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -288,7 +288,14 @@ public void testIndexCanChangeCustomDataPath() throws Exception { final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10)); logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); - createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build()); + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) + .build() + ); client().prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(index); @@ -307,6 +314,16 @@ public void testIndexCanChangeCustomDataPath() throws Exception { logger.info("--> closing the index [{}] before updating data_path", index); assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(ActiveShardCount.DEFAULT)); + // race condition: async flush may cause translog file deletion resulting in an inconsistent stream from + // Files.walk below during copy phase + // temporarily disable refresh to avoid any flushes or syncs that may inadvertently cause the deletion + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1").build()) + ); + final Path newIndexDataPath = sharedDataPath.resolve("end-" + randomAlphaOfLength(10)); IOUtils.rm(newIndexDataPath); @@ -326,11 +343,17 @@ public void testIndexCanChangeCustomDataPath() throws Exception { } logger.info("--> updating data_path to [{}] for index [{}]", newIndexDataPath, index); + // update data path and re-enable refresh assertAcked( client().admin() .indices() .prepareUpdateSettings(index) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()).build()) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.DEFAULT_REFRESH_INTERVAL.toString()) + .build() + ) .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)) ); diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index c020089afd1b9..9e9ef0113e17c 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,11 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_7_0 = new LegacyESVersion(7070099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_7_1 = new LegacyESVersion(7070199, org.apache.lucene.util.Version.LUCENE_8_5_1); public static final LegacyESVersion V_7_10_0 = new LegacyESVersion(7100099, org.apache.lucene.util.Version.LUCENE_8_7_0); public static final LegacyESVersion V_7_10_1 = new LegacyESVersion(7100199, org.apache.lucene.util.Version.LUCENE_8_7_0); public static final LegacyESVersion V_7_10_2 = new LegacyESVersion(7100299, org.apache.lucene.util.Version.LUCENE_8_7_0); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index d51be9bc27ac9..77ffd98513698 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.info; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -63,22 +62,7 @@ public class NodesInfoRequest extends BaseNodesRequest { public NodesInfoRequest(StreamInput in) throws IOException { super(in); requestedMetrics.clear(); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - // prior to version 8.x, a NodesInfoRequest was serialized as a list - // of booleans in a fixed order - optionallyAddMetric(in.readBoolean(), Metric.SETTINGS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.OS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PROCESS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.JVM.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.THREAD_POOL.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.TRANSPORT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.HTTP.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PLUGINS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INGEST.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INDICES.metricName()); - } else { - requestedMetrics.addAll(Arrays.asList(in.readStringArray())); - } + requestedMetrics.addAll(Arrays.asList(in.readStringArray())); } /** @@ -165,22 +149,7 @@ private void optionallyAddMetric(boolean addMetric, String metricName) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - // prior to version 8.x, a NodesInfoRequest was serialized as a list - // of booleans in a fixed order - out.writeBoolean(Metric.SETTINGS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.OS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PROCESS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.JVM.containedIn(requestedMetrics)); - out.writeBoolean(Metric.THREAD_POOL.containedIn(requestedMetrics)); - out.writeBoolean(Metric.TRANSPORT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.HTTP.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PLUGINS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INGEST.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INDICES.containedIn(requestedMetrics)); - } else { - out.writeStringArray(requestedMetrics.toArray(new String[0])); - } + out.writeStringArray(requestedMetrics.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index b721c8f005974..b5298b5f5eefb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.reload; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; @@ -68,18 +67,16 @@ public NodesReloadSecureSettingsRequest() { public NodesReloadSecureSettingsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - final BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - this.secureSettingsPassword = null; + final BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); + try { + this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); + } finally { + Arrays.fill(bytes, (byte) 0); } + } else { + this.secureSettingsPassword = null; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index babec0b7c119f..f6fb788289a5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; @@ -64,22 +63,7 @@ public NodesStatsRequest(StreamInput in) throws IOException { indices = new CommonStatsFlags(in); requestedMetrics.clear(); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - optionallyAddMetric(in.readBoolean(), Metric.OS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PROCESS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.JVM.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.THREAD_POOL.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.FS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.TRANSPORT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.HTTP.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.BREAKER.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.SCRIPT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.DISCOVERY.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INGEST.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.ADAPTIVE_SELECTION.metricName()); - } else { - requestedMetrics.addAll(in.readStringList()); - } + requestedMetrics.addAll(in.readStringList()); } /** @@ -200,22 +184,7 @@ private void optionallyAddMetric(boolean includeMetric, String metricName) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); indices.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - out.writeBoolean(Metric.OS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PROCESS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.JVM.containedIn(requestedMetrics)); - out.writeBoolean(Metric.THREAD_POOL.containedIn(requestedMetrics)); - out.writeBoolean(Metric.FS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.TRANSPORT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.HTTP.containedIn(requestedMetrics)); - out.writeBoolean(Metric.BREAKER.containedIn(requestedMetrics)); - out.writeBoolean(Metric.SCRIPT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.DISCOVERY.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INGEST.containedIn(requestedMetrics)); - out.writeBoolean(Metric.ADAPTIVE_SELECTION.containedIn(requestedMetrics)); - } else { - out.writeStringArray(requestedMetrics.toArray(new String[0])); - } + out.writeStringArray(requestedMetrics.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 5fd83244f3dea..df889831cb4ca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -144,9 +144,6 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); partial = in.readBoolean(); includeAliases = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - readSettingsFromStream(in); // formerly the unused settings field - } indexSettings = readSettingsFromStream(in); ignoreIndexSettings = in.readStringArray(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { @@ -170,9 +167,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(partial); out.writeBoolean(includeAliases); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - writeSettingsToStream(Settings.EMPTY, out); // formerly the unused settings field - } writeSettingsToStream(indexSettings, out); out.writeStringArray(ignoreIndexSettings); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index c6519d6669ea8..8c5d741251501 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; @@ -71,11 +70,9 @@ public ClusterStatsResponse(StreamInput in) throws IOException { String clusterUUID = null; MappingStats mappingStats = null; AnalysisStats analysisStats = null; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - clusterUUID = in.readOptionalString(); - mappingStats = in.readOptionalWriteable(MappingStats::new); - analysisStats = in.readOptionalWriteable(AnalysisStats::new); - } + clusterUUID = in.readOptionalString(); + mappingStats = in.readOptionalWriteable(MappingStats::new); + analysisStats = in.readOptionalWriteable(AnalysisStats::new); this.clusterUUID = clusterUUID; // built from nodes rather than from the stream directly @@ -132,11 +129,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(timestamp); out.writeOptionalWriteable(status); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalString(clusterUUID); - out.writeOptionalWriteable(indicesStats.getMappings()); - out.writeOptionalWriteable(indicesStats.getAnalysis()); - } + out.writeOptionalString(clusterUUID); + out.writeOptionalWriteable(indicesStats.getMappings()); + out.writeOptionalWriteable(indicesStats.getAnalysis()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index f9a785d1759d8..3f78d99d49423 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -90,11 +89,7 @@ public Alias(StreamInput in) throws IOException { indexRouting = in.readOptionalString(); searchRouting = in.readOptionalString(); writeIndex = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - isHidden = in.readOptionalBoolean(); - } else { - isHidden = null; - } + isHidden = in.readOptionalBoolean(); } public Alias(String name) { @@ -236,9 +231,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(indexRouting); out.writeOptionalString(searchRouting); out.writeOptionalBoolean(writeIndex); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalBoolean(isHidden); - } + out.writeOptionalBoolean(isHidden); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index e210d54a922b1..97f9911d8227e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; @@ -276,10 +275,7 @@ public AliasActions(StreamInput in) throws IOException { searchRouting = in.readOptionalString(); indexRouting = in.readOptionalString(); writeIndex = in.readOptionalBoolean(); - // TODO fix for backport of https://github.com/elastic/elasticsearch/pull/52547 - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - isHidden = in.readOptionalBoolean(); - } + isHidden = in.readOptionalBoolean(); originalAliases = in.readStringArray(); mustExist = in.readOptionalBoolean(); } @@ -294,10 +290,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(searchRouting); out.writeOptionalString(indexRouting); out.writeOptionalBoolean(writeIndex); - // TODO fix for backport https://github.com/elastic/elasticsearch/pull/52547 - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalBoolean(isHidden); - } + out.writeOptionalBoolean(isHidden); out.writeStringArray(originalAliases); out.writeOptionalBoolean(mustExist); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index b428c3523b666..77007fba539ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -32,13 +32,12 @@ package org.opensearch.action.admin.indices.forcemerge; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; -import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.engine.Engine; import java.io.IOException; import java.util.Arrays; @@ -74,13 +73,12 @@ public static final class Defaults { private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; - private static final Version FORCE_MERGE_UUID_VERSION = LegacyESVersion.V_7_7_0; + private static final Version FORCE_MERGE_UUID_VERSION = Version.V_3_0_0; /** * Force merge UUID to store in the live commit data of a shard under * {@link org.opensearch.index.engine.Engine#FORCE_MERGE_UUID_KEY} after force merging it. */ - @Nullable private final String forceMergeUUID; /** @@ -99,9 +97,11 @@ public ForceMergeRequest(StreamInput in) throws IOException { onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { - forceMergeUUID = in.readOptionalString(); - } else { - forceMergeUUID = null; + forceMergeUUID = in.readString(); + } else if ((forceMergeUUID = in.readOptionalString()) == null) { + throw new IllegalStateException( + "As of legacy version 7.7 [" + Engine.FORCE_MERGE_UUID_KEY + "] is no longer optional in force merge requests." + ); } } @@ -143,7 +143,6 @@ public ForceMergeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) { * Force merge UUID to use when force merging or {@code null} if not using one in a mixed version cluster containing nodes older than * {@link #FORCE_MERGE_UUID_VERSION}. */ - @Nullable public String forceMergeUUID() { return forceMergeUUID; } @@ -183,6 +182,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { + out.writeString(forceMergeUUID); + } else { out.writeOptionalString(forceMergeUUID); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 346e394bbb35e..364952dff82f1 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.bulk; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.DocWriteRequest.OpType; @@ -270,11 +269,7 @@ public Failure(StreamInput in) throws IOException { cause = in.readException(); status = ExceptionsHelper.status(cause); seqNo = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - term = in.readVLong(); - } else { - term = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; - } + term = in.readVLong(); aborted = in.readBoolean(); } @@ -287,9 +282,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeException(cause); out.writeZLong(seqNo); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeVLong(term); - } + out.writeVLong(term); out.writeBoolean(aborted); } diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java index 4fdf8d551af61..4afce67f98297 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -125,11 +124,7 @@ public FieldCapabilities( this.indices = in.readOptionalStringArray(); this.nonSearchableIndices = in.readOptionalStringArray(); this.nonAggregatableIndices = in.readOptionalStringArray(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); - } else { - meta = Collections.emptyMap(); - } + this.meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); } @Override @@ -141,9 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalStringArray(indices); out.writeOptionalStringArray(nonSearchableIndices); out.writeOptionalStringArray(nonAggregatableIndices); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); - } + out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java index 062e5bc1af7e5..73025c2eac3f0 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java @@ -32,17 +32,13 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; /** * Describes the capabilities of a field in a single index. @@ -74,42 +70,20 @@ public class IndexFieldCapabilities implements Writeable { } IndexFieldCapabilities(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - this.name = in.readString(); - this.type = in.readString(); - this.isSearchable = in.readBoolean(); - this.isAggregatable = in.readBoolean(); - this.meta = in.readMap(StreamInput::readString, StreamInput::readString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - FieldCapabilities fieldCaps = new FieldCapabilities(in); - this.name = fieldCaps.getName(); - this.type = fieldCaps.getType(); - this.isSearchable = fieldCaps.isSearchable(); - this.isAggregatable = fieldCaps.isAggregatable(); - this.meta = fieldCaps.meta() - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().iterator().next())); - } + this.name = in.readString(); + this.type = in.readString(); + this.isSearchable = in.readBoolean(); + this.isAggregatable = in.readBoolean(); + this.meta = in.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeString(name); - out.writeString(type); - out.writeBoolean(isSearchable); - out.writeBoolean(isAggregatable); - out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - Map> wrappedMeta = meta.entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, entry -> Collections.singleton(entry.getValue()))); - FieldCapabilities fieldCaps = new FieldCapabilities(name, type, isSearchable, isAggregatable, null, null, null, wrappedMeta); - fieldCaps.writeTo(out); - } + out.writeString(name); + out.writeString(type); + out.writeBoolean(isSearchable); + out.writeBoolean(isAggregatable); + out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); } public String getName() { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index e4dd0d0b1a116..ceb2a150bf352 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; @@ -237,11 +236,7 @@ public SearchRequest(StreamInput in) throws IOException { requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); maxConcurrentShardRequests = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - preFilterShardSize = in.readOptionalVInt(); - } else { - preFilterShardSize = in.readVInt(); - } + preFilterShardSize = in.readOptionalVInt(); allowPartialSearchResults = in.readOptionalBoolean(); localClusterAlias = in.readOptionalString(); if (localClusterAlias != null) { @@ -275,11 +270,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); out.writeVInt(maxConcurrentShardRequests); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalVInt(preFilterShardSize); - } else { - out.writeVInt(preFilterShardSize == null ? DEFAULT_BATCHED_REDUCE_SIZE : preFilterShardSize); - } + out.writeOptionalVInt(preFilterShardSize); out.writeOptionalBoolean(allowPartialSearchResults); out.writeOptionalString(localClusterAlias); if (localClusterAlias != null) { diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java index 3e5c76aa1f66f..da432a73c8a1d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamInput; @@ -62,18 +61,13 @@ static InternalScrollSearchRequest internalScrollSearchRequest(ShardSearchContex } static String buildScrollId(AtomicArray searchPhaseResults, Version version) { - boolean includeContextUUID = version.onOrAfter(LegacyESVersion.V_7_7_0); try { BytesStreamOutput out = new BytesStreamOutput(); - if (includeContextUUID) { - out.writeString(INCLUDE_CONTEXT_UUID); - } + out.writeString(INCLUDE_CONTEXT_UUID); out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE); out.writeVInt(searchPhaseResults.asList().size()); for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) { - if (includeContextUUID) { - out.writeString(searchPhaseResult.getContextId().getSessionId()); - } + out.writeString(searchPhaseResult.getContextId().getSessionId()); out.writeLong(searchPhaseResult.getContextId().getId()); SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget(); if (searchShardTarget.getClusterAlias() != null) { diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index 3fb572d0ecbbf..2cf211fa6da3c 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -31,7 +31,6 @@ package org.opensearch.action.support; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -276,21 +275,12 @@ public EnumSet getExpandWildcards() { public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet