diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 7c65df1f677a5..572f6c981a052 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -7,7 +7,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - java: [ 11, 17, 21 ] + java: [ 11, 17, 21, 23 ] os: [ubuntu-latest, windows-latest, macos-latest, macos-13] steps: - uses: actions/checkout@v4 diff --git a/CHANGELOG.md b/CHANGELOG.md index a6f7d387456ae..d25de60aa989b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Add support for async deletion in S3BlobContainer ([#15621](https://github.com/opensearch-project/OpenSearch/pull/15621)) - MultiTermQueries in keyword fields now default to `indexed` approach and gated behind cluster setting ([#15637](https://github.com/opensearch-project/OpenSearch/pull/15637)) +- [Workload Management] Add Integration Tests for Workload Management CRUD APIs ([#15955](https://github.com/opensearch-project/OpenSearch/pull/15955)) - [Workload Management] QueryGroup resource cancellation framework changes ([#15651](https://github.com/opensearch-project/OpenSearch/pull/15651)) - [Workload Management] Add orchestrator for wlm resiliency (QueryGroupService) ([#15925](https://github.com/opensearch-project/OpenSearch/pull/15925)) +- [Workload Management] Add QueryGroup Stats API Logic ([15777](https://github.com/opensearch-project/OpenSearch/pull/15777)) - Fallback to Remote cluster-state on Term-Version check mismatch - ([#15424](https://github.com/opensearch-project/OpenSearch/pull/15424)) - Implement WithFieldName interface in ValuesSourceAggregationBuilder & FieldSortBuilder ([#15916](https://github.com/opensearch-project/OpenSearch/pull/15916)) - Add successfulSearchShardIndices in searchRequestContext ([#15967](https://github.com/opensearch-project/OpenSearch/pull/15967), [#16110](https://github.com/opensearch-project/OpenSearch/pull/16110)) @@ -52,6 +54,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) - Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) - Ensure RestHandler.Wrapper delegates all implementations to the wrapped handler ([#16154](https://github.com/opensearch-project/OpenSearch/pull/16154)) +- Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) ### Deprecated @@ -65,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix search_as_you_type not supporting multi-fields ([#15988](https://github.com/opensearch-project/OpenSearch/pull/15988)) - Avoid infinite loop when `flat_object` field contains invalid token ([#15985](https://github.com/opensearch-project/OpenSearch/pull/15985)) - Fix infinite loop in nested agg ([#15931](https://github.com/opensearch-project/OpenSearch/pull/15931)) +- Fix update settings with null replica not honoring cluster setting bug ([#14948](https://github.com/opensearch-project/OpenSearch/pull/14948)) - Fix race condition in node-join and node-left ([#15521](https://github.com/opensearch-project/OpenSearch/pull/15521)) - Streaming bulk request hangs ([#16158](https://github.com/opensearch-project/OpenSearch/pull/16158)) - Fix warnings from SLF4J on startup when repository-s3 is installed ([#16194](https://github.com/opensearch-project/OpenSearch/pull/16194)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index b984ef3800490..aaa1d8bdef85c 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -114,7 +114,7 @@ dependencies { api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.6' + api 'de.thetaphi:forbiddenapis:3.8' api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java index d83f1b01ee043..988b8b749ee64 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -51,7 +51,7 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { project.getPlugins().apply(CompileOnlyResolvePlugin.class); project.getConfigurations().create("forbiddenApisCliJar"); - project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.5.1"); + project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.8"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); if (BuildParams.isInternal() && project.getPath().equals(":libs:opensearch-core") == false) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 8d5ce9143cbac..9365f1c732229 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "21.0.4+7"; + private static final String SYSTEM_JDK_VERSION = "23+37"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "21.0.4+7"; + private static final String GRADLE_JDK_VERSION = "23+37"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9affa9b9a45f1..4bb6f3af7fbe4 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.12.0 bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.4+7 +bundled_jdk = 23+37 # optional dependencies spatial4j = 0.7 @@ -60,9 +60,9 @@ bouncycastle=1.78 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.12.0 +mockito = 5.14.1 objenesis = 3.2 -bytebuddy = 1.14.9 +bytebuddy = 1.15.4 # benchmark dependencies jmh = 1.35 diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index 3ca6b1fe84ea7..eb27dd1a76634 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -19,7 +19,7 @@ repositories { allprojects { plugins.withId('jacoco') { - jacoco.toolVersion = '0.8.10' + jacoco.toolVersion = '0.8.12' } } diff --git a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java index 6d1985f2165cd..6fabad3a7d5bd 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java +++ b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java @@ -114,8 +114,8 @@ public GrokCaptureExtracter forBoolean(Function, GrokCaptureEx /** * Build an extract that has access to the "native" type of the extracter * match. This means that patterns like {@code %{NUMBER:bytes:float}} has - * access to an actual {@link float}. Extracters returned from this method - * should be stateless stateless and can be reused. Pathological implementations + * access to an actual float. Extracters returned from this method + * should be stateless and can be reused. Pathological implementations * of the {@code map} parameter could violate this, but the caller should * take care to stay sane. *

@@ -144,27 +144,27 @@ public interface NativeExtracterMap { T forString(Function, GrokCaptureExtracter> buildExtracter); /** - * Called when the native type is an {@link int}. + * Called when the native type is an int. */ T forInt(Function buildExtracter); /** - * Called when the native type is an {@link long}. + * Called when the native type is an long. */ T forLong(Function buildExtracter); /** - * Called when the native type is an {@link float}. + * Called when the native type is an float. */ T forFloat(Function buildExtracter); /** - * Called when the native type is an {@link double}. + * Called when the native type is an double. */ T forDouble(Function buildExtracter); /** - * Called when the native type is an {@link boolean}. + * Called when the native type is an boolean. */ T forBoolean(Function, GrokCaptureExtracter> buildExtracter); } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index 02e1ff40f7ed6..7a56cfdb6278e 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -175,6 +175,7 @@ protected List buildDynamicHosts(Settings nodeSettings, int no exchange.getResponseHeaders().set("Content-Type", "text/xml; charset=UTF-8"); exchange.sendResponseHeaders(HttpStatus.SC_OK, responseBody.length); exchange.getResponseBody().write(responseBody); + exchange.getResponseBody().flush(); return; } } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index ce097667f9c4b..194a262c6ed7f 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -125,6 +125,7 @@ public void testEC2DiscoveryRetriesOnRateLimiting() throws IOException { exchange.getResponseHeaders().set("Content-Type", "text/xml; charset=UTF-8"); exchange.sendResponseHeaders(HttpStatus.SC_OK, responseBody.length); exchange.getResponseBody().write(responseBody); + exchange.getResponseBody().flush(); return; } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 15c4a1647fd38..faa9b2bfff84d 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -146,6 +146,10 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', } final List miniHDFSArgs = [] + if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_23) { + miniHDFSArgs.add('-Djava.security.manager=allow') + } + // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { miniHDFSArgs.add("-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")}"); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 110d91bfbd822..1048ec784ec4e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -188,7 +188,7 @@ private static int allocatedProcessors(Settings settings) { } private static int urgentPoolCount(Settings settings) { - return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + return boundedBy((allocatedProcessors(settings) + 1) / 2, 1, 2); } private static int priorityPoolCount(Settings settings) { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java new file mode 100644 index 0000000000000..9ac1564c807c3 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.SizeUnit; +import org.opensearch.common.unit.SizeValue; +import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.threadpool.ThreadPool.ThreadPoolType; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.concurrent.Executor; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class S3RepositoryPluginTests extends OpenSearchTestCase { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + + public void testGetExecutorBuilders() throws IOException { + final int processors = randomIntBetween(1, 64); + Settings settings = Settings.builder().put("node.name", "test").put("node.processors", processors).build(); + Path configPath = createTempDir(); + ThreadPool threadPool = null; + try (S3RepositoryPlugin plugin = new S3RepositoryPlugin(settings, configPath)) { + List> executorBuilders = plugin.getExecutorBuilders(settings); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + final Executor executor = threadPool.executor(URGENT_FUTURE_COMPLETION); + assertNotNull(executor); + assertThat(executor, instanceOf(OpenSearchThreadPoolExecutor.class)); + final OpenSearchThreadPoolExecutor openSearchThreadPoolExecutor = (OpenSearchThreadPoolExecutor) executor; + final ThreadPool.Info info = threadPool.info(URGENT_FUTURE_COMPLETION); + int size = boundedBy((processors + 1) / 2, 1, 2); + assertThat(info.getName(), equalTo(URGENT_FUTURE_COMPLETION)); + assertThat(info.getThreadPoolType(), equalTo(ThreadPoolType.FIXED)); + assertThat(info.getQueueSize(), notNullValue()); + assertThat(info.getQueueSize(), equalTo(new SizeValue(10, SizeUnit.KILO))); + assertThat(openSearchThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(10_000)); + + assertThat(info.getMin(), equalTo(size)); + assertThat(openSearchThreadPoolExecutor.getCorePoolSize(), equalTo(size)); + assertThat(info.getMax(), equalTo(size)); + assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(size)); + + final int availableProcessors = Runtime.getRuntime().availableProcessors(); + if (processors > availableProcessors) { + assertWarnings( + "setting [node.processors] to value [" + + processors + + "] which is more than available processors [" + + availableProcessors + + "] is deprecated" + ); + } + } finally { + if (threadPool != null) { + terminate(threadPool); + } + } + } + + private static int boundedBy(int value, int min, int max) { + return Math.min(max, Math.max(min, value)); + } + +} diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index cb14d22ef149f..ad6737bbd24b0 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -10,6 +10,7 @@ */ apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { diff --git a/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java new file mode 100644 index 0000000000000..7e1d61e57b6f7 --- /dev/null +++ b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.test.rest.OpenSearchRestTestCase; + +import java.io.IOException; + +public class WorkloadManagementRestIT extends OpenSearchRestTestCase { + + public void testCreate() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + performOperation("DELETE", "_wlm/query_group/analytics", null); + } + + public void testMultipleCreate() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics2", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + + Response response2 = performOperation("PUT", "_wlm/query_group", getCreateJson("users", "soft", 0.2, 0.1)); + assertEquals(response2.getStatusLine().getStatusCode(), 200); + + assertThrows( + ResponseException.class, + () -> performOperation("PUT", "_wlm/query_group", getCreateJson("users2", "soft", 0.41, 0.71)) + ); + performOperation("DELETE", "_wlm/query_group/analytics2", null); + performOperation("DELETE", "_wlm/query_group/users", null); + } + + public void testGet() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics3", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + + Response response2 = performOperation("GET", "_wlm/query_group/analytics3", null); + assertEquals(response2.getStatusLine().getStatusCode(), 200); + String responseBody2 = EntityUtils.toString(response2.getEntity()); + assertTrue(responseBody2.contains("\"name\":\"analytics3\"")); + assertTrue(responseBody2.contains("\"resiliency_mode\":\"enforced\"")); + assertTrue(responseBody2.contains("\"cpu\":0.4")); + assertTrue(responseBody2.contains("\"memory\":0.2")); + + assertThrows(ResponseException.class, () -> performOperation("GET", "_wlm/query_group/analytics97", null)); + performOperation("DELETE", "_wlm/query_group/analytics3", null); + } + + public void testDelete() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics4", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + + Response response2 = performOperation("DELETE", "_wlm/query_group/analytics4", null); + assertEquals(response2.getStatusLine().getStatusCode(), 200); + assertTrue(EntityUtils.toString(response2.getEntity()).contains("\"acknowledged\":true")); + + assertThrows(ResponseException.class, () -> performOperation("DELETE", "_wlm/query_group/analytics99", null)); + } + + public void testUpdate() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics5", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + + Response response2 = performOperation("PUT", "_wlm/query_group/analytics5", getUpdateJson("monitor", 0.41, 0.21)); + assertEquals(response2.getStatusLine().getStatusCode(), 200); + String responseBody2 = EntityUtils.toString(response2.getEntity()); + assertTrue(responseBody2.contains("\"name\":\"analytics5\"")); + assertTrue(responseBody2.contains("\"resiliency_mode\":\"monitor\"")); + assertTrue(responseBody2.contains("\"cpu\":0.41")); + assertTrue(responseBody2.contains("\"memory\":0.21")); + + String json = "{\n" + + " \"resource_limits\": {\n" + + " \"cpu\" : 1.1,\n" + + " \"memory\" : -0.1\n" + + " }\n" + + "}'"; + assertThrows(ResponseException.class, () -> performOperation("PUT", "_wlm/query_group/analytics5", json)); + assertThrows( + ResponseException.class, + () -> performOperation("PUT", "_wlm/query_group/analytics98", getUpdateJson("monitor", 0.43, 0.23)) + ); + performOperation("DELETE", "_wlm/query_group/analytics5", null); + } + + public void testCRUD() throws Exception { + Response response = performOperation("PUT", "_wlm/query_group", getCreateJson("analytics6", "enforced", 0.4, 0.2)); + assertEquals(response.getStatusLine().getStatusCode(), 200); + + Response response1 = performOperation("PUT", "_wlm/query_group/analytics6", getUpdateJson("monitor", 0.41, 0.21)); + assertEquals(response1.getStatusLine().getStatusCode(), 200); + + Response response2 = performOperation("GET", "_wlm/query_group/analytics6", null); + assertEquals(response2.getStatusLine().getStatusCode(), 200); + String responseBody2 = EntityUtils.toString(response2.getEntity()); + assertTrue(responseBody2.contains("\"name\":\"analytics6\"")); + assertTrue(responseBody2.contains("\"resiliency_mode\":\"monitor\"")); + assertTrue(responseBody2.contains("\"cpu\":0.41")); + assertTrue(responseBody2.contains("\"memory\":0.21")); + + assertThrows( + ResponseException.class, + () -> performOperation("PUT", "_wlm/query_group", getCreateJson("users3", "monitor", 0.6, 0.8)) + ); + + Response response4 = performOperation("PUT", "_wlm/query_group", getCreateJson("users3", "monitor", 0.59, 0.79)); + assertEquals(response4.getStatusLine().getStatusCode(), 200); + + Response response5 = performOperation("DELETE", "_wlm/query_group/analytics6", null); + assertEquals(response5.getStatusLine().getStatusCode(), 200); + String responseBody5 = EntityUtils.toString(response5.getEntity()); + assertTrue(responseBody5.contains("\"acknowledged\":true")); + + Response response6 = performOperation("GET", "_wlm/query_group", null); + assertEquals(response6.getStatusLine().getStatusCode(), 200); + String responseBody6 = EntityUtils.toString(response6.getEntity()); + assertTrue(responseBody6.contains("\"query_groups\"")); + assertTrue(responseBody6.contains("\"users3\"")); + assertFalse(responseBody6.contains("\"analytics6\"")); + performOperation("DELETE", "_wlm/query_group/users3", null); + } + + static String getCreateJson(String name, String resiliencyMode, double cpu, double memory) { + return "{\n" + + " \"name\": \"" + + name + + "\",\n" + + " \"resiliency_mode\": \"" + + resiliencyMode + + "\",\n" + + " \"resource_limits\": {\n" + + " \"cpu\" : " + + cpu + + ",\n" + + " \"memory\" : " + + memory + + "\n" + + " }\n" + + "}"; + } + + static String getUpdateJson(String resiliencyMode, double cpu, double memory) { + return "{\n" + + " \"resiliency_mode\": \"" + + resiliencyMode + + "\",\n" + + " \"resource_limits\": {\n" + + " \"cpu\" : " + + cpu + + ",\n" + + " \"memory\" : " + + memory + + "\n" + + " }\n" + + "}"; + } + + Response performOperation(String method, String uriPath, String json) throws IOException { + Request request = new Request(method, uriPath); + if (json != null) { + request.setJsonEntity(json); + } + return client().performRequest(request); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index bcf23a37c0010..8d0a3b5788a70 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -1366,7 +1366,8 @@ public void testSingleShardStoreFetchUsingBatchAction() throws ExecutionExceptio DiscoveryNode[] nodes = getDiscoveryNodes(); TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( new String[] { indexName }, - nodes + nodes, + false ); Index index = resolveIndex(indexName); ShardId shardId = new ShardId(index, 0); @@ -1379,12 +1380,14 @@ public void testSingleShardStoreFetchUsingBatchAction() throws ExecutionExceptio public void testShardStoreFetchMultiNodeMultiIndexesUsingBatchAction() throws Exception { internalCluster().startNodes(2); + ensureStableCluster(2); String indexName1 = "test1"; String indexName2 = "test2"; DiscoveryNode[] nodes = getDiscoveryNodes(); TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( new String[] { indexName1, indexName2 }, - nodes + nodes, + true ); ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { @@ -1406,7 +1409,8 @@ public void testShardStoreFetchNodeNotConnectedUsingBatchAction() { String indexName = "test"; TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( new String[] { indexName }, - new DiscoveryNode[] { nonExistingNode } + new DiscoveryNode[] { nonExistingNode }, + false ); assertTrue(response.hasFailures()); assertEquals(1, response.failures().size()); @@ -1513,10 +1517,14 @@ private void prepareIndices(String[] indices, int numberOfPrimaryShards, int num private TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch prepareAndSendRequest( String[] indices, - DiscoveryNode[] nodes + DiscoveryNode[] nodes, + boolean ensureGreen ) { Map shardAttributesMap = null; prepareIndices(indices, 1, 1); + if (ensureGreen) { + ensureGreen(indices); + } shardAttributesMap = prepareRequestMap(indices, 1); TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response; return ActionTestUtils.executeBlocking( diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 108ef14f0fcb4..557f9e19ee424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -84,6 +84,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -586,6 +587,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException { .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) .build(); String index = "index"; assertAcked( @@ -599,13 +601,14 @@ public void testCacheWithFilteredAlias() throws InterruptedException { ); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); client.prepareIndex(index).setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); - // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); - OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); indexRandomForConcurrentSearch(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + assertCacheState(client, index, 0, 0); SearchResponse r1 = client.prepareSearch(index) @@ -823,7 +826,7 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo SearchResponse resp = client.prepareSearch(index) .setRequestCache(true) .setQuery(timeoutQueryBuilder) - .setTimeout(TimeValue.ZERO) + .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) .get(); assertTrue(resp.isTimedOut()); RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java index 475d0a154a98b..5e90ee3185618 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java @@ -895,6 +895,69 @@ private void runTestDefaultNumberOfReplicasTest(final boolean closeIndex) { assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(response.getIndexToSettings().get("test")), equalTo(1)); } + public void testNullReplicaUpdate() { + internalCluster().ensureAtLeastNumDataNodes(2); + + // cluster setting + String defaultNumberOfReplica = "3"; + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("cluster.default_number_of_replicas", defaultNumberOfReplica)) + .get() + ); + // create index with number of replicas will ignore default value + assertAcked( + client().admin() + .indices() + .prepareCreate("test") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "2")) + ); + + String numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals("2", numberOfReplicas); + // update setting with null replica will use cluster setting of replica number + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)) + ); + + numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals(defaultNumberOfReplica, numberOfReplicas); + + // Clean up cluster setting, then update setting with null replica should pick up default value of 1 + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("cluster.default_number_of_replicas")) + ); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)) + ); + + numberOfReplicas = client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getSetting("test", IndexMetadata.SETTING_NUMBER_OF_REPLICAS); + assertEquals("1", numberOfReplicas); + } + public void testNoopUpdate() { internalCluster().ensureAtLeastNumDataNodes(2); final ClusterService clusterService = internalCluster().getClusterManagerNodeInstance(ClusterService.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 3e1127e0ce240..ee51eff4e1bd5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -11,21 +11,29 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.REMOTE_STORE; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_SLOWDOWN_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { @@ -33,18 +41,29 @@ public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase @Override protected Settings nodeSettings(int nodeOrdinal) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) + .put(segmentRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) .build(); } ActionListener noOpActionListener = new ActionListener<>() { @Override - public void onResponse(Void unused) {} + public void onResponse(Void unused) { + // do nothing + } @Override - public void onFailure(Exception e) {} + public void onFailure(Exception e) { + fail(); + } }; public void testTimestampPinUnpin() throws Exception { @@ -57,15 +76,8 @@ public void testTimestampPinUnpin() throws Exception { ); Tuple> pinnedTimestampWithFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps(); - long lastFetchTimestamp = pinnedTimestampWithFetchTimestamp.v1(); - assertEquals(-1L, lastFetchTimestamp); assertEquals(Set.of(), pinnedTimestampWithFetchTimestamp.v2()); - assertThrows( - IllegalArgumentException.class, - () -> remoteStorePinnedTimestampService.pinTimestamp(1234L, "ss1", noOpActionListener) - ); - long timestamp1 = System.currentTimeMillis() + 30000L; long timestamp2 = System.currentTimeMillis() + 60000L; long timestamp3 = System.currentTimeMillis() + 900000L; @@ -197,6 +209,104 @@ public void onFailure(Exception e) { remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); } + public void testPinExceptionsOlderTimestamp() throws InterruptedException { + prepareCluster(1, 1, INDEX_NAME, 0, 2); + ensureGreen(INDEX_NAME); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + CountDownLatch latch = new CountDownLatch(1); + remoteStorePinnedTimestampService.pinTimestamp(1234L, "ss1", new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + // We expect onFailure to be called + fail(); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof IllegalArgumentException); + } + }, latch)); + + latch.await(); + } + + public void testPinExceptionsRemoteStoreCallTakeTime() throws InterruptedException, ExecutionException { + prepareCluster(1, 1, INDEX_NAME, 0, 2); + ensureGreen(INDEX_NAME); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + CountDownLatch latch = new CountDownLatch(1); + slowDownRepo(REPOSITORY_NAME, 10); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueSeconds(1)); + long timestampToBePinned = System.currentTimeMillis() + 600000; + remoteStorePinnedTimestampService.pinTimestamp(timestampToBePinned, "ss1", new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + // We expect onFailure to be called + fail(); + } + + @Override + public void onFailure(Exception e) { + logger.error(e.getMessage()); + assertTrue(e instanceof RuntimeException); + assertTrue(e.getMessage().contains("Timestamp pinning took")); + + // Check if the timestamp was unpinned + remoteStorePinnedTimestampService.forceSyncPinnedTimestamps(); + assertFalse(RemoteStorePinnedTimestampService.getPinnedTimestamps().v2().contains(timestampToBePinned)); + } + }, latch)); + + latch.await(); + } + + protected void slowDownRepo(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_SLOWDOWN_SETTING.getKey(), value); + createRepository(repoName, rmd.type(), settings); + } + + public void testUnpinException() throws InterruptedException { + prepareCluster(1, 1, INDEX_NAME, 0, 2); + ensureGreen(INDEX_NAME); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + CountDownLatch latch = new CountDownLatch(1); + remoteStorePinnedTimestampService.unpinTimestamp(1234L, "dummy-entity", new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + // We expect onFailure to be called + fail(); + } + + @Override + public void onFailure(Exception e) { + logger.error(e.getMessage()); + assertTrue(e instanceof IllegalArgumentException); + } + }, latch)); + + latch.await(); + } + public void testLastSuccessfulFetchOfPinnedTimestampsPresentInNodeStats() throws Exception { logger.info("Starting up cluster manager"); logger.info("cluster.remote_store.pinned_timestamps.enabled set to true"); diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 12c7626e385a4..1e6eae87af53a 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -123,6 +123,8 @@ import org.opensearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksAction; import org.opensearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; +import org.opensearch.action.admin.cluster.wlm.TransportWlmStatsAction; +import org.opensearch.action.admin.cluster.wlm.WlmStatsAction; import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.TransportIndicesAliasesAction; @@ -375,6 +377,7 @@ import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestWlmStatsAction; import org.opensearch.rest.action.admin.cluster.dangling.RestDeleteDanglingIndexAction; import org.opensearch.rest.action.admin.cluster.dangling.RestImportDanglingIndexAction; import org.opensearch.rest.action.admin.cluster.dangling.RestListDanglingIndicesAction; @@ -622,6 +625,7 @@ public void reg actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); actions.register(RemoteInfoAction.INSTANCE, TransportRemoteInfoAction.class); actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); + actions.register(WlmStatsAction.INSTANCE, TransportWlmStatsAction.class); actions.register(RemoteStoreStatsAction.INSTANCE, TransportRemoteStoreStatsAction.class); actions.register(NodesUsageAction.INSTANCE, TransportNodesUsageAction.class); actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); @@ -828,6 +832,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestClearVotingConfigExclusionsAction()); registerHandler.accept(new RestMainAction()); registerHandler.accept(new RestNodesInfoAction(settingsFilter)); + registerHandler.accept(new RestWlmStatsAction()); registerHandler.accept(new RestRemoteClusterInfoAction()); registerHandler.accept(new RestNodesStatsAction()); registerHandler.accept(new RestNodesUsageAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java new file mode 100644 index 0000000000000..9c2fb3f1689ec --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/TransportWlmStatsAction.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.wlm; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupService; +import org.opensearch.wlm.stats.WlmStats; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action for obtaining WlmStats + * + * @opensearch.experimental + */ +public class TransportWlmStatsAction extends TransportNodesAction { + + final QueryGroupService queryGroupService; + + @Inject + public TransportWlmStatsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + QueryGroupService queryGroupService, + ActionFilters actionFilters + ) { + super( + WlmStatsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + WlmStatsRequest::new, + WlmStatsRequest::new, + ThreadPool.Names.MANAGEMENT, + WlmStats.class + ); + this.queryGroupService = queryGroupService; + } + + @Override + protected WlmStatsResponse newResponse(WlmStatsRequest request, List wlmStats, List failures) { + return new WlmStatsResponse(clusterService.getClusterName(), wlmStats, failures); + } + + @Override + protected WlmStatsRequest newNodeRequest(WlmStatsRequest request) { + return request; + } + + @Override + protected WlmStats newNodeResponse(StreamInput in) throws IOException { + return new WlmStats(in); + } + + @Override + protected WlmStats nodeOperation(WlmStatsRequest wlmStatsRequest) { + assert transportService.getLocalNode() != null; + return new WlmStats( + transportService.getLocalNode(), + queryGroupService.nodeStats(wlmStatsRequest.getQueryGroupIds(), wlmStatsRequest.isBreach()) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsAction.java new file mode 100644 index 0000000000000..2dfb10fc5dc90 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.wlm; + +import org.opensearch.action.ActionType; + +/** + * Transport action for obtaining Workload Management Stats. + * + * @opensearch.experimental + */ +public class WlmStatsAction extends ActionType { + public static final WlmStatsAction INSTANCE = new WlmStatsAction(); + public static final String NAME = "cluster:monitor/wlm/stats"; + + private WlmStatsAction() { + super(NAME, WlmStatsResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java new file mode 100644 index 0000000000000..bf4f79faff478 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.wlm; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +/** + * A request to get Workload Management Stats + */ +@ExperimentalApi +public class WlmStatsRequest extends BaseNodesRequest { + + private final Set queryGroupIds; + private final Boolean breach; + + public WlmStatsRequest(StreamInput in) throws IOException { + super(in); + this.queryGroupIds = new HashSet<>(Set.of(in.readStringArray())); + this.breach = in.readOptionalBoolean(); + } + + /** + * Get QueryGroup stats from nodes based on the nodes ids specified. If none are passed, stats + * for all nodes will be returned. + */ + public WlmStatsRequest(String[] nodesIds, Set queryGroupIds, Boolean breach) { + super(false, nodesIds); + this.queryGroupIds = queryGroupIds; + this.breach = breach; + } + + public WlmStatsRequest() { + super(false, (String[]) null); + queryGroupIds = new HashSet<>(); + this.breach = false; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(queryGroupIds.toArray(new String[0])); + out.writeOptionalBoolean(breach); + } + + public Set getQueryGroupIds() { + return queryGroupIds; + } + + public Boolean isBreach() { + return breach; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java new file mode 100644 index 0000000000000..2ce1b09a61fc6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponse.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.wlm; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.wlm.stats.QueryGroupStats; +import org.opensearch.wlm.stats.WlmStats; + +import java.io.IOException; +import java.util.List; + +/** + * A response for obtaining Workload Management Stats + */ +@ExperimentalApi +public class WlmStatsResponse extends BaseNodesResponse implements ToXContentFragment { + + WlmStatsResponse(StreamInput in) throws IOException { + super(in); + } + + WlmStatsResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(WlmStats::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (WlmStats wlmStats : getNodes()) { + builder.startObject(wlmStats.getNode().getId()); + QueryGroupStats queryGroupStats = wlmStats.getQueryGroupStats(); + queryGroupStats.toXContent(builder, params); + builder.endObject(); + } + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.toString(); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/wlm/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/package-info.java new file mode 100644 index 0000000000000..13724b335e7c6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/wlm/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** WlmStats transport handlers. */ +package org.opensearch.action.admin.cluster.wlm; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 05f09c1a6e661..5ce4d442fbe0b 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -137,6 +137,8 @@ import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.opensearch.action.admin.cluster.wlm.WlmStatsRequest; +import org.opensearch.action.admin.cluster.wlm.WlmStatsResponse; import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; @@ -320,6 +322,13 @@ public interface ClusterAdminClient extends OpenSearchClient { */ NodesStatsRequestBuilder prepareNodesStats(String... nodesIds); + /** + * QueryGroup stats of the cluster. + * @param request The wlmStatsRequest + * @param listener A listener to be notified with a result + */ + void wlmStats(WlmStatsRequest request, ActionListener listener); + void remoteStoreStats(RemoteStoreStatsRequest request, ActionListener listener); RemoteStoreStatsRequestBuilder prepareRemoteStoreStats(String index, String shardId); diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 509cd732357d6..f4683ab516cef 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -179,6 +179,9 @@ import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.opensearch.action.admin.cluster.wlm.WlmStatsAction; +import org.opensearch.action.admin.cluster.wlm.WlmStatsRequest; +import org.opensearch.action.admin.cluster.wlm.WlmStatsResponse; import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; @@ -918,6 +921,11 @@ public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) { return new NodesStatsRequestBuilder(this, NodesStatsAction.INSTANCE).setNodesIds(nodesIds); } + @Override + public void wlmStats(final WlmStatsRequest request, final ActionListener listener) { + execute(WlmStatsAction.INSTANCE, request, listener); + } + @Override public void remoteStoreStats(final RemoteStoreStatsRequest request, final ActionListener listener) { execute(RemoteStoreStatsAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index e5b07f867d609..9cffc7051d756 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -108,7 +108,7 @@ public CoordinationState( // ToDo: revisit this check while making the setting dynamic this.isRemotePublicationEnabled = isRemoteStateEnabled && REMOTE_PUBLICATION_SETTING.get(settings) - && localNode.isRemoteStatePublicationConfigured(); + && localNode.isRemoteStatePublicationEnabled(); } public boolean isRemotePublicationEnabled() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index aca8653be4dd8..13033b670d44b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -513,10 +513,10 @@ private static void ensureRemoteClusterStateNodesCompatibility(DiscoveryNode joi assert existingNodes.isEmpty() == false; Optional remotePublicationNode = existingNodes.stream() - .filter(DiscoveryNode::isRemoteStatePublicationConfigured) + .filter(DiscoveryNode::isRemoteStatePublicationEnabled) .findFirst(); - if (remotePublicationNode.isPresent() && joiningNode.isRemoteStatePublicationConfigured()) { + if (remotePublicationNode.isPresent() && joiningNode.isRemoteStatePublicationEnabled()) { ensureRepositoryCompatibility(joiningNode, remotePublicationNode.get(), REMOTE_CLUSTER_PUBLICATION_REPO_NAME_ATTRIBUTES); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 6277b70f9a7c8..42aa55433dd5f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -370,7 +370,7 @@ private boolean validateRemotePublicationConfiguredOnAllNodes(DiscoveryNodes dis assert ClusterMetadataManifest.getCodecForVersion(discoveryNodes.getMinNodeVersion()) >= ClusterMetadataManifest.CODEC_V0; for (DiscoveryNode node : discoveryNodes.getNodes().values()) { // if a node is non-remote then created local publication context - if (node.isRemoteStatePublicationConfigured() == false) { + if (node.isRemoteStatePublicationEnabled() == false) { return false; } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 4e7e31bbb9222..8c350d6b9cef5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -140,6 +140,7 @@ public void updateSettings( validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + final int defaultReplicaCount = clusterService.getClusterSettings().get(Metadata.DEFAULT_REPLICA_COUNT_SETTING); Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); @@ -248,7 +249,10 @@ public ClusterState execute(ClusterState currentState) { } if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { - final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); + final int updatedNumberOfReplicas = openSettings.getAsInt( + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + defaultReplicaCount + ); if (preserveExisting == false) { for (Index index : request.indices()) { if (index.getName().charAt(0) != '.') { @@ -329,15 +333,13 @@ public ClusterState execute(ClusterState currentState) { /* * The setting index.number_of_replicas is special; we require that this setting has a value in the index. When * creating the index, we ensure this by explicitly providing a value for the setting to the default (one) if - * there is a not value provided on the source of the index creation. A user can update this setting though, - * including updating it to null, indicating that they want to use the default value. In this case, we again - * have to provide an explicit value for the setting to the default (one). + * there is no value provided on the source of the index creation or "cluster.default_number_of_replicas" is + * not set. A user can update this setting though, including updating it to null, indicating that they want to + * use the value configured by cluster settings or a default value 1. In this case, we again have to provide + * an explicit value for the setting. */ if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettings) == false) { - indexSettings.put( - IndexMetadata.SETTING_NUMBER_OF_REPLICAS, - IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(Settings.EMPTY) - ); + indexSettings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, defaultReplicaCount); } Settings finalSettings = indexSettings.build(); indexScopedSettings.validate( diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index b06c2ef0bdbe4..8c9a37a767ede 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -518,7 +518,7 @@ public boolean isRemoteStoreNode() { * Returns whether settings required for remote cluster state publication is configured * @return true if the node contains remote cluster state node attribute and remote routing table node attribute */ - public boolean isRemoteStatePublicationConfigured() { + public boolean isRemoteStatePublicationEnabled() { return this.getAttributes() .keySet() .stream() diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java index cc0468efb243e..5563511aa8d45 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -90,8 +90,6 @@ public void setMaxExpansions(int maxExpansions) { /** * Sets the phrase slop for this query. - * - * @see org.apache.lucene.search.PhraseQuery.Builder#getSlop() */ public int getSlop() { return slop; diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 4698440f08036..7fbb38c47572c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -62,8 +62,8 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; @@ -113,21 +113,6 @@ public static DateFormatter getDefaultDateTimeFormatter() { : LEGACY_DEFAULT_DATE_TIME_FORMATTER; } - public static Query getDefaultQuery(Query pointRangeQuery, Query dvQuery, String name, long l, long u) { - return FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING) - ? new ApproximateIndexOrDocValuesQuery( - pointRangeQuery, - new ApproximatePointRangeQuery(name, pack(new long[] { l }).bytes, pack(new long[] { u }).bytes, new long[] { l }.length) { - @Override - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }, - dvQuery - ) - : new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); - } - /** * Resolution of the date time * @@ -488,22 +473,42 @@ public Query rangeQuery( } DateMathParser parser = forcedDateParser == null ? dateMathParser : forcedDateParser; return dateRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, parser, context, resolution, (l, u) -> { - Query pointRangeQuery = isSearchable() ? LongPoint.newRangeQuery(name(), l, u) : null; Query dvQuery = hasDocValues() ? SortedNumericDocValuesField.newSlowRangeQuery(name(), l, u) : null; - if (isSearchable() && hasDocValues()) { - Query query = getDefaultQuery(pointRangeQuery, dvQuery, name(), l, u); - if (context.indexSortedOnField(name())) { - query = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, query); + if (isSearchable()) { + Query pointRangeQuery = LongPoint.newRangeQuery(name(), l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(name())) { + query = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, query); + } + } else { + query = pointRangeQuery; + } + if (FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY_SETTING)) { + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + name(), + pack(new long[] { l }).bytes, + pack(new long[] { u }).bytes, + new long[] { l }.length + ) { + @Override + protected String toString(int dimension, byte[] value) { + return Long.toString(LongPoint.decodeDimension(value, 0)); + } + } + ); } return query; } - if (hasDocValues()) { - if (context.indexSortedOnField(name())) { - dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, dvQuery); - } - return dvQuery; + + // Not searchable. Must have doc values. + if (context.indexSortedOnField(name())) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(name(), l, u, dvQuery); } - return pointRangeQuery; + return dvQuery; }); } diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 03d841d13b7f7..73a79a54ca588 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -27,6 +27,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; @@ -95,7 +96,7 @@ public RemoteStoreRestoreService( * @param listener restore listener */ public void restore(RestoreRemoteStoreRequest request, final ActionListener listener) { - clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask(Priority.URGENT) { String restoreUUID; RestoreInfo restoreInfo = null; diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index ec6755ea25703..86ea799ab311d 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -227,9 +227,6 @@ public void setOccur(BooleanClause.Occur occur) { this.occur = occur; } - /** - * @deprecated See {@link MatchQueryBuilder#setCommonTermsCutoff(Float)} for more details - */ @Deprecated public void setCommonTermsCutoff(Float cutoff) { this.commonTermsCutoff = cutoff; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 24f0cb15ddb25..5341f9507bef4 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -32,7 +32,7 @@ public interface SegmentReplicationSource { /** * Get Metadata for a ReplicationCheckpoint. * - * @param replicationId {@link long} - ID of the replication event. + * @param replicationId long - ID of the replication event. * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param listener {@link ActionListener} listener that completes with a {@link CheckpointInfoResponse}. */ @@ -41,7 +41,7 @@ public interface SegmentReplicationSource { /** * Fetch the requested segment files. Passes a listener that completes when files are stored locally. * - * @param replicationId {@link long} - ID of the replication event. + * @param replicationId long - ID of the replication event. * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param filesToFetch {@link List} List of files to fetch. * @param indexShard {@link IndexShard} Reference to the IndexShard. diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 5c965b06a4b69..584d95b9ff6b5 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -354,12 +354,12 @@ public class Node implements Closeable { ); /** - * controls whether the node is allowed to persist things like metadata to disk - * Note that this does not control whether the node stores actual indices (see - * {@link #NODE_DATA_SETTING}). However, if this is false, {@link #NODE_DATA_SETTING} - * and {@link #NODE_MASTER_SETTING} must also be false. - * - */ + * controls whether the node is allowed to persist things like metadata to disk + * Note that this does not control whether the node stores actual indices (see + * {@link #NODE_DATA_SETTING}). However, if this is false, {@link #NODE_DATA_SETTING} + * and {@link #NODE_MASTER_SETTING} must also be false. + * + */ public static final Setting NODE_LOCAL_STORAGE_SETTING = Setting.boolSetting( "node.local_storage", true, diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index a3382d8568ec5..98fcad0e6c496 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -130,16 +130,16 @@ public static Map> fetchPinnedTimestamps(Settings settings, Re * @throws IllegalArgumentException If the timestamp is less than the current time minus one second */ public void pinTimestamp(long timestamp, String pinningEntity, ActionListener listener) { - // If a caller uses current system time to pin the timestamp, following check will almost always fail. - // So, we allow pinning timestamp in the past upto some buffer - long lookbackIntervalInMills = RemoteStoreSettings.getPinnedTimestampsLookbackInterval().millis(); - if (timestamp < (System.currentTimeMillis() - lookbackIntervalInMills)) { - throw new IllegalArgumentException( - "Timestamp to be pinned is less than current timestamp - value of cluster.remote_store.pinned_timestamps.lookback_interval" - ); - } - long startTime = System.nanoTime(); try { + // If a caller uses current system time to pin the timestamp, following check will almost always fail. + // So, we allow pinning timestamp in the past upto some buffer + long lookbackIntervalInMills = RemoteStoreSettings.getPinnedTimestampsLookbackInterval().millis(); + if (timestamp < (System.currentTimeMillis() - lookbackIntervalInMills)) { + throw new IllegalArgumentException( + "Timestamp to be pinned is less than current timestamp - value of cluster.remote_store.pinned_timestamps.lookback_interval" + ); + } + long startTime = System.nanoTime(); logger.debug("Pinning timestamp = {} against entity = {}", timestamp, pinningEntity); blobContainer.writeBlob(getBlobName(timestamp, pinningEntity), new ByteArrayInputStream(new byte[0]), 0, true); long elapsedTime = System.nanoTime() - startTime; @@ -155,7 +155,7 @@ public void pinTimestamp(long timestamp, String pinningEntity, ActionListener findMatchingShardPaths(String indexId, Map snapshotShardPaths) { - return snapshotShardPaths.keySet().stream().filter(s -> s.startsWith(indexId)).collect(Collectors.toList()); + return snapshotShardPaths.keySet() + .stream() + .filter(s -> (s.startsWith(indexId) || s.startsWith(SnapshotShardPaths.FILE_PREFIX + indexId))) + .collect(Collectors.toList()); } /** @@ -2546,11 +2550,11 @@ public void finalizeSnapshot( */ private void cleanupRedundantSnapshotShardPaths(Set updatedShardPathsIndexIds) { Set updatedIndexIds = updatedShardPathsIndexIds.stream() - .map(s -> s.split("\\" + SnapshotShardPaths.DELIMITER)[0]) + .map(s -> getIndexId(s.split("\\" + SnapshotShardPaths.DELIMITER)[0])) .collect(Collectors.toSet()); Set indexIdShardPaths = getSnapshotShardPaths().keySet(); List staleShardPaths = indexIdShardPaths.stream().filter(s -> updatedShardPathsIndexIds.contains(s) == false).filter(s -> { - String indexId = s.split("\\" + SnapshotShardPaths.DELIMITER)[0]; + String indexId = getIndexId(s.split("\\" + SnapshotShardPaths.DELIMITER)[0]); return updatedIndexIds.contains(indexId); }).collect(Collectors.toList()); try { @@ -2595,7 +2599,7 @@ String writeIndexShardPaths(IndexId indexId, SnapshotId snapshotId, int shardCou List paths = getShardPaths(indexId, shardCount); int pathType = indexId.getShardPathType(); int pathHashAlgorithm = FNV_1A_COMPOSITE_1.getCode(); - String blobName = String.join( + String name = String.join( SnapshotShardPaths.DELIMITER, indexId.getId(), indexId.getName(), @@ -2611,9 +2615,9 @@ String writeIndexShardPaths(IndexId indexId, SnapshotId snapshotId, int shardCou PathType.fromCode(pathType), PathHashAlgorithm.fromCode(pathHashAlgorithm) ); - SNAPSHOT_SHARD_PATHS_FORMAT.write(shardPaths, snapshotShardPathBlobContainer(), blobName); + SNAPSHOT_SHARD_PATHS_FORMAT.write(shardPaths, snapshotShardPathBlobContainer(), name); logShardPathsOperationSuccess(indexId, snapshotId); - return blobName; + return SnapshotShardPaths.FILE_PREFIX + name; } catch (IOException e) { logShardPathsOperationWarning(indexId, snapshotId, e); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java new file mode 100644 index 0000000000000..51bd313e74df0 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestWlmStatsAction.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.wlm.WlmStatsRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.common.Strings; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestActions; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Transport action to get Workload Management stats + * + * @opensearch.experimental + */ +public class RestWlmStatsAction extends BaseRestHandler { + + @Override + public List routes() { + return unmodifiableList( + asList( + new Route(GET, "_wlm/stats"), + new Route(GET, "_wlm/{nodeId}/stats"), + new Route(GET, "_wlm/stats/{queryGroupId}"), + new Route(GET, "_wlm/{nodeId}/stats/{queryGroupId}") + ) + ); + } + + @Override + public String getName() { + return "wlm_stats_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + Set queryGroupIds = Strings.tokenizeByCommaToSet(request.param("queryGroupId", "_all")); + Boolean breach = request.hasParam("breach") ? Boolean.parseBoolean(request.param("boolean")) : null; + WlmStatsRequest wlmStatsRequest = new WlmStatsRequest(nodesIds, queryGroupIds, breach); + return channel -> client.admin().cluster().wlmStats(wlmStatsRequest, new RestActions.NodesResponseRestListener<>(channel)); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java index 17da7e5712be8..356380a687003 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/Helper.java @@ -23,7 +23,7 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.DateRangeIncludingNowQuery; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.search.internal.SearchContext; import java.io.IOException; @@ -55,7 +55,7 @@ private Helper() {} queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); - queryWrappers.put(ApproximateIndexOrDocValuesQuery.class, q -> ((ApproximateIndexOrDocValuesQuery) q).getOriginalQuery()); + queryWrappers.put(ApproximateScoreQuery.class, q -> ((ApproximateScoreQuery) q).getOriginalQuery()); } /** diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java deleted file mode 100644 index b99e0a0cbf808..0000000000000 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQuery.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.approximate; - -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryVisitor; - -/** - * A wrapper around {@link IndexOrDocValuesQuery} that can be used to run approximate queries. - * It delegates to either {@link ApproximateQuery} or {@link IndexOrDocValuesQuery} based on whether the query can be approximated or not. - * @see ApproximateQuery - */ -public final class ApproximateIndexOrDocValuesQuery extends ApproximateScoreQuery { - - private final ApproximateQuery approximateIndexQuery; - private final IndexOrDocValuesQuery indexOrDocValuesQuery; - - public ApproximateIndexOrDocValuesQuery(Query indexQuery, ApproximateQuery approximateIndexQuery, Query dvQuery) { - super(new IndexOrDocValuesQuery(indexQuery, dvQuery), approximateIndexQuery); - this.approximateIndexQuery = approximateIndexQuery; - this.indexOrDocValuesQuery = new IndexOrDocValuesQuery(indexQuery, dvQuery); - } - - @Override - public String toString(String field) { - return "ApproximateIndexOrDocValuesQuery(indexQuery=" - + indexOrDocValuesQuery.getIndexQuery().toString(field) - + ", approximateIndexQuery=" - + approximateIndexQuery.toString(field) - + ", dvQuery=" - + indexOrDocValuesQuery.getRandomAccessQuery().toString(field) - + ")"; - } - - @Override - public void visit(QueryVisitor visitor) { - indexOrDocValuesQuery.visit(visitor); - } - - @Override - public boolean equals(Object obj) { - if (sameClassAs(obj) == false) { - return false; - } - return true; - } - - @Override - public int hashCode() { - int h = classHash(); - h = 31 * h + indexOrDocValuesQuery.getIndexQuery().hashCode(); - h = 31 * h + indexOrDocValuesQuery.getRandomAccessQuery().hashCode(); - return h; - } -} diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java index 8076da6ab970b..6ff01f5f39d36 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java @@ -433,9 +433,6 @@ public boolean canApproximate(SearchContext context) { if (context.aggregations() != null) { return false; } - if (!(context.query() instanceof ApproximateIndexOrDocValuesQuery)) { - return false; - } // size 0 could be set for caching if (context.from() + context.size() == 0) { this.setSize(10_000); diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java index d1dd32b239f28..2395142c606ae 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximateScoreQuery.java @@ -21,12 +21,12 @@ * Entry-point for the approximation framework. * This class is heavily inspired by {@link org.apache.lucene.search.IndexOrDocValuesQuery}. It acts as a wrapper that consumer two queries, a regular query and an approximate version of the same. By default, it executes the regular query and returns {@link Weight#scorer} for the original query. At run-time, depending on certain constraints, we can re-write the {@code Weight} to use the approximate weight instead. */ -public class ApproximateScoreQuery extends Query { +public final class ApproximateScoreQuery extends Query { private final Query originalQuery; private final ApproximateQuery approximationQuery; - protected Query resolvedQuery; + Query resolvedQuery; public ApproximateScoreQuery(Query originalQuery, ApproximateQuery approximationQuery) { this.originalQuery = originalQuery; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java index 88af14e2232f9..878c2baba4ce9 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java @@ -29,7 +29,8 @@ public class SnapshotShardPaths implements ToXContent { public static final String DELIMITER = "."; - public static final String FILE_NAME_FORMAT = "%s"; + public static final String FILE_PREFIX = "snapshot_path_"; + public static final String FILE_NAME_FORMAT = FILE_PREFIX + "%s"; private static final String PATHS_FIELD = "paths"; private static final String INDEX_ID_FIELD = "indexId"; @@ -101,7 +102,7 @@ public static ShardInfo parseShardPath(String shardPath) { throw new IllegalArgumentException("Invalid shard path format: " + shardPath); } try { - IndexId indexId = new IndexId(parts[1], parts[0], Integer.parseInt(parts[3])); + IndexId indexId = new IndexId(parts[1], getIndexId(parts[0]), Integer.parseInt(parts[3])); int shardCount = Integer.parseInt(parts[2]); return new ShardInfo(indexId, shardCount); } catch (NumberFormatException e) { @@ -109,6 +110,13 @@ public static ShardInfo parseShardPath(String shardPath) { } } + public static String getIndexId(String indexIdField) { + if (indexIdField.startsWith(FILE_PREFIX)) { + return indexIdField.substring(FILE_PREFIX.length()); + } + return indexIdField; + } + /** * Represents parsed information from a shard path. * This class encapsulates the index ID and shard count extracted from a shard path string. diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index d795fd252b7fc..e0b15e54f6e2e 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -293,12 +293,7 @@ public ThreadPool( ); builders.put( Names.REMOTE_STATE_READ, - new ScalingExecutorBuilder( - Names.REMOTE_STATE_READ, - 1, - twiceAllocatedProcessors(allocatedProcessors), - TimeValue.timeValueMinutes(5) - ) + new ScalingExecutorBuilder(Names.REMOTE_STATE_READ, 1, boundedBy(4 * allocatedProcessors, 4, 32), TimeValue.timeValueMinutes(5)) ); builders.put( Names.INDEX_SEARCHER, diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java index cda5916db26f3..cb0be5597766a 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.search.SearchShardTask; +import org.opensearch.ResourceNotFoundException; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.metadata.Metadata; @@ -42,6 +42,7 @@ /** * As of now this is a stub and main implementation PR will be raised soon.Coming PR will collate these changes with core QueryGroupService changes + * @opensearch.experimental */ public class QueryGroupService extends AbstractLifecycleComponent implements @@ -49,7 +50,6 @@ public class QueryGroupService extends AbstractLifecycleComponent TaskResourceTrackingService.TaskCompletionListener { private static final Logger logger = LogManager.getLogger(QueryGroupService.class); - private final QueryGroupTaskCancellationService taskCancellationService; private volatile Scheduler.Cancellable scheduledFuture; private final ThreadPool threadPool; @@ -205,16 +205,48 @@ public void incrementFailuresFor(final String queryGroupId) { /** * @return node level query group stats */ - public QueryGroupStats nodeStats() { + public QueryGroupStats nodeStats(Set queryGroupIds, Boolean requestedBreached) { final Map statsHolderMap = new HashMap<>(); - for (Map.Entry queryGroupsState : queryGroupsStateAccessor.getQueryGroupStateMap().entrySet()) { - final String queryGroupId = queryGroupsState.getKey(); - final QueryGroupState currentState = queryGroupsState.getValue(); + Map existingStateMap = queryGroupsStateAccessor.getQueryGroupStateMap(); + if (!queryGroupIds.contains("_all")) { + for (String id : queryGroupIds) { + if (!existingStateMap.containsKey(id)) { + throw new ResourceNotFoundException("QueryGroup with id " + id + " does not exist"); + } + } + } + if (existingStateMap != null) { + existingStateMap.forEach((queryGroupId, currentState) -> { + boolean shouldInclude = queryGroupIds.contains("_all") || queryGroupIds.contains(queryGroupId); + if (shouldInclude) { + if (requestedBreached == null || requestedBreached == resourceLimitBreached(queryGroupId, currentState)) { + statsHolderMap.put(queryGroupId, QueryGroupStatsHolder.from(currentState)); + } + } + }); + } + return new QueryGroupStats(statsHolderMap); + } - statsHolderMap.put(queryGroupId, QueryGroupStatsHolder.from(currentState)); + /** + * @return if the QueryGroup breaches any resource limit based on the LastRecordedUsage + */ + public boolean resourceLimitBreached(String id, QueryGroupState currentState) { + QueryGroup queryGroup = clusterService.state().metadata().queryGroups().get(id); + if (queryGroup == null) { + throw new ResourceNotFoundException("QueryGroup with id " + id + " does not exist"); } - return new QueryGroupStats(statsHolderMap); + for (ResourceType resourceType : TRACKED_RESOURCES) { + if (queryGroup.getResourceLimits().containsKey(resourceType)) { + final double threshold = getNormalisedRejectionThreshold(queryGroup.getResourceLimits().get(resourceType), resourceType); + final double lastRecordedUsage = currentState.getResourceState().get(resourceType).getLastRecordedUsage(); + if (threshold < lastRecordedUsage) { + return true; + } + } + } + return false; } /** @@ -321,10 +353,6 @@ public void onTaskCompleted(Task task) { queryGroupId = QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(); } - if (task instanceof SearchShardTask) { - queryGroupsStateAccessor.getQueryGroupState(queryGroupId).shardCompletions.inc(); - } else { - queryGroupsStateAccessor.getQueryGroupState(queryGroupId).completions.inc(); - } + queryGroupsStateAccessor.getQueryGroupState(queryGroupId).totalCompletions.inc(); } } diff --git a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java b/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java index cbc7046a79464..a082ed159e829 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java +++ b/server/src/main/java/org/opensearch/wlm/stats/QueryGroupState.java @@ -21,12 +21,7 @@ public class QueryGroupState { /** * co-ordinator level completions at the query group level, this is a cumulative counter since the Opensearch start time */ - public final CounterMetric completions = new CounterMetric(); - - /** - * shard level completions at the query group level, this is a cumulative counter since the Opensearch start time - */ - public final CounterMetric shardCompletions = new CounterMetric(); + public final CounterMetric totalCompletions = new CounterMetric(); /** * rejections at the query group level, this is a cumulative counter since the OpenSearch start time @@ -61,16 +56,8 @@ public QueryGroupState() { * * @return co-ordinator completions in the query group */ - public long getCompletions() { - return completions.count(); - } - - /** - * - * @return shard completions in the query group - */ - public long getShardCompletions() { - return shardCompletions.count(); + public long getTotalCompletions() { + return totalCompletions.count(); } /** diff --git a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java b/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java index 9d74201de252b..42ce3ac0019db 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java +++ b/server/src/main/java/org/opensearch/wlm/stats/QueryGroupStats.java @@ -82,21 +82,23 @@ public int hashCode() { return Objects.hash(stats); } + public Map getStats() { + return stats; + } + /** * This is a stats holder object which will hold the data for a query group at a point in time * the instance will only be created on demand through stats api */ public static class QueryGroupStatsHolder implements ToXContentObject, Writeable { - public static final String COMPLETIONS = "completions"; - public static final String REJECTIONS = "rejections"; + public static final String COMPLETIONS = "total_completions"; + public static final String REJECTIONS = "total_rejections"; public static final String TOTAL_CANCELLATIONS = "total_cancellations"; public static final String FAILURES = "failures"; - public static final String SHARD_COMPLETIONS = "shard_completions"; private long completions; - private long shardCompletions; private long rejections; private long failures; - private long totalCancellations; + private long cancellations; private Map resourceStats; // this is needed to support the factory method @@ -106,15 +108,13 @@ public QueryGroupStatsHolder( long completions, long rejections, long failures, - long totalCancellations, - long shardCompletions, + long cancellations, Map resourceStats ) { this.completions = completions; this.rejections = rejections; this.failures = failures; - this.shardCompletions = shardCompletions; - this.totalCancellations = totalCancellations; + this.cancellations = cancellations; this.resourceStats = resourceStats; } @@ -122,8 +122,7 @@ public QueryGroupStatsHolder(StreamInput in) throws IOException { this.completions = in.readVLong(); this.rejections = in.readVLong(); this.failures = in.readVLong(); - this.totalCancellations = in.readVLong(); - this.shardCompletions = in.readVLong(); + this.cancellations = in.readVLong(); this.resourceStats = in.readMap((i) -> ResourceType.fromName(i.readString()), ResourceStats::new); } @@ -141,11 +140,10 @@ public static QueryGroupStatsHolder from(QueryGroupState queryGroupState) { resourceStatsMap.put(resourceTypeStateEntry.getKey(), ResourceStats.from(resourceTypeStateEntry.getValue())); } - statsHolder.completions = queryGroupState.getCompletions(); + statsHolder.completions = queryGroupState.getTotalCompletions(); statsHolder.rejections = queryGroupState.getTotalRejections(); statsHolder.failures = queryGroupState.getFailures(); - statsHolder.totalCancellations = queryGroupState.getTotalCancellations(); - statsHolder.shardCompletions = queryGroupState.getShardCompletions(); + statsHolder.cancellations = queryGroupState.getTotalCancellations(); statsHolder.resourceStats = resourceStatsMap; return statsHolder; } @@ -160,8 +158,7 @@ public static void writeTo(StreamOutput out, QueryGroupStatsHolder statsHolder) out.writeVLong(statsHolder.completions); out.writeVLong(statsHolder.rejections); out.writeVLong(statsHolder.failures); - out.writeVLong(statsHolder.totalCancellations); - out.writeVLong(statsHolder.shardCompletions); + out.writeVLong(statsHolder.cancellations); out.writeMap(statsHolder.resourceStats, (o, val) -> o.writeString(val.getName()), ResourceStats::writeTo); } @@ -173,10 +170,10 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(COMPLETIONS, completions); - builder.field(SHARD_COMPLETIONS, shardCompletions); + // builder.field(SHARD_COMPLETIONS, shardCompletions); builder.field(REJECTIONS, rejections); - builder.field(FAILURES, failures); - builder.field(TOTAL_CANCELLATIONS, totalCancellations); + // builder.field(FAILURES, failures); + builder.field(TOTAL_CANCELLATIONS, cancellations); for (ResourceType resourceType : ResourceType.getSortedValues()) { ResourceStats resourceStats1 = resourceStats.get(resourceType); @@ -195,15 +192,14 @@ public boolean equals(Object o) { QueryGroupStatsHolder that = (QueryGroupStatsHolder) o; return completions == that.completions && rejections == that.rejections - && shardCompletions == that.shardCompletions && Objects.equals(resourceStats, that.resourceStats) && failures == that.failures - && totalCancellations == that.totalCancellations; + && cancellations == that.cancellations; } @Override public int hashCode() { - return Objects.hash(completions, shardCompletions, rejections, totalCancellations, failures, resourceStats); + return Objects.hash(completions, rejections, cancellations, failures, resourceStats); } } @@ -213,6 +209,7 @@ public int hashCode() { public static class ResourceStats implements ToXContentObject, Writeable { public static final String CURRENT_USAGE = "current_usage"; public static final String CANCELLATIONS = "cancellations"; + public static final String REJECTIONS = "rejections"; public static final double PRECISION = 1e-9; private final double currentUsage; private final long cancellations; @@ -264,7 +261,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(CURRENT_USAGE, currentUsage); builder.field(CANCELLATIONS, cancellations); - builder.field(QueryGroupStatsHolder.REJECTIONS, rejections); + builder.field(REJECTIONS, rejections); return builder; } diff --git a/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java b/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java new file mode 100644 index 0000000000000..3313021bfae52 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/stats/WlmStats.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.stats; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains the stats for Workload Management + */ +public class WlmStats extends BaseNodeResponse implements ToXContentObject, Writeable { + private final QueryGroupStats queryGroupStats; + + public WlmStats(DiscoveryNode node, QueryGroupStats queryGroupStats) { + super(node); + this.queryGroupStats = queryGroupStats; + } + + public WlmStats(StreamInput in) throws IOException { + super(in); + queryGroupStats = new QueryGroupStats(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + queryGroupStats.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryGroupStats.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WlmStats that = (WlmStats) o; + return Objects.equals(getQueryGroupStats(), that.getQueryGroupStats()); + } + + @Override + public int hashCode() { + return Objects.hash(queryGroupStats); + } + + public QueryGroupStats getQueryGroupStats() { + return queryGroupStats; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java new file mode 100644 index 0000000000000..01dc033568a95 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/wlm/WlmStatsResponseTests.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.wlm; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.stats.QueryGroupStats; +import org.opensearch.wlm.stats.WlmStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class WlmStatsResponseTests extends OpenSearchTestCase { + ClusterName clusterName = new ClusterName("test-cluster"); + String testQueryGroupId = "safjgagnaeekg-3r3fads"; + DiscoveryNode node = new DiscoveryNode( + "node-1", + buildNewFakeTransportAddress(), + new HashMap<>(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + Map statsHolderMap = new HashMap<>(); + QueryGroupStats queryGroupStats = new QueryGroupStats( + Map.of( + testQueryGroupId, + new QueryGroupStats.QueryGroupStatsHolder( + 0, + 0, + 1, + 0, + Map.of( + ResourceType.CPU, + new QueryGroupStats.ResourceStats(0, 0, 0), + ResourceType.MEMORY, + new QueryGroupStats.ResourceStats(0, 0, 0) + ) + ) + ) + ); + WlmStats wlmStats = new WlmStats(node, queryGroupStats); + List wlmStatsList = List.of(wlmStats); + List failedNodeExceptionList = new ArrayList<>(); + + public void testSerializationAndDeserialization() throws IOException { + WlmStatsResponse queryGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); + BytesStreamOutput out = new BytesStreamOutput(); + queryGroupStatsResponse.writeTo(out); + StreamInput in = out.bytes().streamInput(); + WlmStatsResponse deserializedResponse = new WlmStatsResponse(in); + assertEquals(queryGroupStatsResponse.getClusterName(), deserializedResponse.getClusterName()); + assertEquals(queryGroupStatsResponse.getNodes().size(), deserializedResponse.getNodes().size()); + } + + public void testToString() { + WlmStatsResponse queryGroupStatsResponse = new WlmStatsResponse(clusterName, wlmStatsList, failedNodeExceptionList); + String responseString = queryGroupStatsResponse.toString(); + assertEquals( + "{\n" + + " \"node-1\" : {\n" + + " \"query_groups\" : {\n" + + " \"safjgagnaeekg-3r3fads\" : {\n" + + " \"total_completions\" : 0,\n" + + " \"total_rejections\" : 0,\n" + + " \"total_cancellations\" : 0,\n" + + " \"cpu\" : {\n" + + " \"current_usage\" : 0.0,\n" + + " \"cancellations\" : 0,\n" + + " \"rejections\" : 0\n" + + " },\n" + + " \"memory\" : {\n" + + " \"current_usage\" : 0.0,\n" + + " \"cancellations\" : 0,\n" + + " \"rejections\" : 0\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}", + responseString + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java new file mode 100644 index 0000000000000..49d2cc4d23e62 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportWlmStatsActionTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.nodes; + +import org.opensearch.action.admin.cluster.wlm.TransportWlmStatsAction; +import org.opensearch.action.admin.cluster.wlm.WlmStatsRequest; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.wlm.QueryGroupService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; + +public class TransportWlmStatsActionTests extends TransportNodesActionTests { + + /** + * We don't want to send discovery nodes list to each request that is sent across from the coordinator node. + * This behavior is asserted in this test. + */ + public void testWlmStatsActionWithRetentionOfDiscoveryNodesList() { + WlmStatsRequest request = new WlmStatsRequest(); + Map> combinedSentRequest = performWlmStatsAction(request); + + assertNotNull(combinedSentRequest); + combinedSentRequest.forEach((node, capturedRequestList) -> { + assertNotNull(capturedRequestList); + capturedRequestList.forEach(sentRequest -> { assertNull(sentRequest.concreteNodes()); }); + }); + } + + private Map> performWlmStatsAction(WlmStatsRequest request) { + TransportNodesAction action = new TransportWlmStatsAction( + THREAD_POOL, + clusterService, + transportService, + mock(QueryGroupService.class), + new ActionFilters(Collections.emptySet()) + ); + PlainActionFuture listener = new PlainActionFuture<>(); + action.new AsyncAction(null, request, listener).start(); + Map> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear(); + Map> combinedSentRequest = new HashMap<>(); + + capturedRequests.forEach((node, capturedRequestList) -> { + List sentRequestList = new ArrayList<>(); + + capturedRequestList.forEach(preSentRequest -> { + BytesStreamOutput out = new BytesStreamOutput(); + try { + WlmStatsRequest wlmStatsRequestFromCoordinator = (WlmStatsRequest) preSentRequest.request; + wlmStatsRequestFromCoordinator.writeTo(out); + StreamInput in = out.bytes().streamInput(); + WlmStatsRequest wlmStatsRequest = new WlmStatsRequest(in); + sentRequestList.add(wlmStatsRequest); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + combinedSentRequest.put(node, sentRequestList); + }); + + return combinedSentRequest; + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index 259904fc143a1..15b16f4610062 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; @@ -65,8 +66,8 @@ import org.opensearch.index.query.DateRangeIncludingNowQuery; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.joda.time.DateTimeZone; import java.io.IOException; @@ -212,8 +213,11 @@ public void testTermQuery() { MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T14:10:55"; long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)).toInstant().toEpochMilli(); - Query expected = new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant, instant + 999), + Query expected = new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant, instant + 999), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant }).bytes, @@ -224,8 +228,7 @@ public void testTermQuery() { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) + } ); assumeThat( "Using Approximate Range Query as default", @@ -278,8 +281,11 @@ public void testRangeQuery() throws IOException { String date2 = "2016-04-28T11:33:52"; long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; - Query expected = new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + Query expected = new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant1, instant2), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -290,8 +296,7 @@ public void testRangeQuery() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + } ); assumeThat( "Using Approximate Range Query as default", @@ -306,8 +311,11 @@ protected String toString(int dimension, byte[] value) { instant1 = nowInMillis; instant2 = instant1 + 100; expected = new DateRangeIncludingNowQuery( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("field", instant1, instant2), + SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + ), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -318,8 +326,7 @@ protected String toString(int dimension, byte[] value) { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) + } ) ); assumeThat( @@ -388,8 +395,8 @@ public void testRangeQueryWithIndexSort() { "field", instant1, instant2, - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery("field", instant1, instant2), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery(LongPoint.newRangeQuery("field", instant1, instant2), dvQuery), new ApproximatePointRangeQuery( "field", pack(new long[] { instant1 }).bytes, @@ -400,8 +407,7 @@ public void testRangeQueryWithIndexSort() { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - dvQuery + } ) ); assumeThat( diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 7a8ac829bdd97..a11a16f421783 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -50,8 +50,8 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryStringQueryBuilder; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -191,11 +191,14 @@ public void testDateRangeQuery() throws Exception { is(true) ); assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery( - DATE_FIELD_NAME, - parser.parse(lowerBoundExact, () -> 0).toEpochMilli(), - parser.parse(upperBoundExact, () -> 0).toEpochMilli() + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery( + DATE_FIELD_NAME, + parser.parse(lowerBoundExact, () -> 0).toEpochMilli(), + parser.parse(upperBoundExact, () -> 0).toEpochMilli() + ), + controlDv ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, @@ -207,8 +210,7 @@ public void testDateRangeQuery() throws Exception { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - controlDv + } ), queryOnDateField ); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index b157c43e45451..04d4e6b3f852e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -57,7 +57,7 @@ import org.opensearch.index.mapper.RangeFieldMapper.RangeFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.IndexSettingsModule; import org.joda.time.DateTime; import org.junit.Before; @@ -293,7 +293,7 @@ public void testDateRangeQueryUsingMappingFormatLegacy() { ); assertEquals( "field:[1465975790000 TO 1466062190999]", - ((IndexOrDocValuesQuery) ((ApproximateIndexOrDocValuesQuery) queryOnDateField).getOriginalQuery()).getIndexQuery().toString() + ((IndexOrDocValuesQuery) ((ApproximateScoreQuery) queryOnDateField).getOriginalQuery()).getIndexQuery().toString() ); } diff --git a/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java index ddf58073a5206..48f697cff1bea 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchPhraseQueryBuilderTests.java @@ -42,7 +42,7 @@ import org.apache.lucene.search.TermQuery; import org.opensearch.core.common.ParsingException; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -131,7 +131,7 @@ protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query q .or(instanceOf(PointRangeQuery.class)) .or(instanceOf(IndexOrDocValuesQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) - .or(instanceOf(ApproximateIndexOrDocValuesQuery.class)) + .or(instanceOf(ApproximateScoreQuery.class)) ); } diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 5b030df20e889..6f295100d9e47 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; @@ -76,8 +77,8 @@ import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.search.QueryStringQueryParser; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; @@ -863,7 +864,7 @@ public void testToQueryDateWithTimeZone() throws Exception { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(query, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); long lower = 0; // 1970-01-01T00:00:00.999 UTC long upper = 86399999; // 1970-01-01T23:59:59.999 UTC assertEquals(calculateExpectedDateQuery(lower, upper), query); @@ -872,9 +873,12 @@ public void testToQueryDateWithTimeZone() throws Exception { assertEquals(calculateExpectedDateQuery(lower + msPerHour, upper + msPerHour), qsq.timeZone("-01:00").toQuery(context)); } - private ApproximateIndexOrDocValuesQuery calculateExpectedDateQuery(long lower, long upper) { - return new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + private ApproximateScoreQuery calculateExpectedDateQuery(long lower, long upper) { + return new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -885,8 +889,7 @@ private ApproximateIndexOrDocValuesQuery calculateExpectedDateQuery(long lower, protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ); } diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 799d7c7b63462..79f1452db297b 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -55,9 +55,9 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MappedFieldType.Relation; import org.opensearch.index.mapper.MapperService; -import org.opensearch.search.approximate.ApproximateIndexOrDocValuesQuery; import org.opensearch.search.approximate.ApproximatePointRangeQuery; import org.opensearch.search.approximate.ApproximateQuery; +import org.opensearch.search.approximate.ApproximateScoreQuery; import org.opensearch.test.AbstractQueryTestCase; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; @@ -196,10 +196,10 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(query, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - Query approximationQuery = ((ApproximateIndexOrDocValuesQuery) query).getApproximationQuery(); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + Query approximationQuery = ((ApproximateScoreQuery) query).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); - Query originalQuery = ((ApproximateIndexOrDocValuesQuery) query).getOriginalQuery(); + Query originalQuery = ((ApproximateScoreQuery) query).getOriginalQuery(); assertThat(originalQuery, instanceOf(IndexOrDocValuesQuery.class)); MapperService mapperService = context.getMapperService(); MappedFieldType mappedFieldType = mapperService.fieldType(expectedFieldName); @@ -250,8 +250,11 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, } } assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, minLong, maxLong), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, minLong, maxLong), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, minLong, maxLong) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { minLong }).bytes, @@ -262,8 +265,7 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, minLong, maxLong) + } ), query ); @@ -336,16 +338,19 @@ public void testDateRangeQueryFormat() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - Query approximationQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getApproximationQuery(); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); + Query approximationQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); - Query originalQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getOriginalQuery(); + Query originalQuery = ((ApproximateScoreQuery) parsedQuery).getOriginalQuery(); assertThat(originalQuery, instanceOf(IndexOrDocValuesQuery.class)); long lower = DateTime.parse("2012-01-01T00:00:00.000+00").getMillis(); long upper = DateTime.parse("2030-01-01T00:00:00.000+00").getMillis() - 1; assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -356,8 +361,7 @@ public void testDateRangeQueryFormat() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ), parsedQuery ); @@ -394,13 +398,16 @@ public void testDateRangeBoundaries() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); long lower = DateTime.parse("2014-11-01T00:00:00.000+00").getMillis(); long upper = DateTime.parse("2014-12-08T23:59:59.999+00").getMillis(); assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -411,8 +418,7 @@ public void testDateRangeBoundaries() throws IOException { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ) , @@ -430,12 +436,15 @@ protected String toString(int dimension, byte[] value) { + " }\n" + "}"; parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); lower = DateTime.parse("2014-11-30T23:59:59.999+00").getMillis() + 1; upper = DateTime.parse("2014-12-08T00:00:00.000+00").getMillis() - 1; assertEquals( - new ApproximateIndexOrDocValuesQuery( - LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + new ApproximateScoreQuery( + new IndexOrDocValuesQuery( + LongPoint.newRangeQuery(DATE_FIELD_NAME, lower, upper), + SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + ), new ApproximatePointRangeQuery( DATE_FIELD_NAME, pack(new long[] { lower }).bytes, @@ -446,8 +455,7 @@ protected String toString(int dimension, byte[] value) { protected String toString(int dimension, byte[] value) { return Long.toString(LongPoint.decodeDimension(value, 0)); } - }, - SortedNumericDocValuesField.newSlowRangeQuery(DATE_FIELD_NAME, lower, upper) + } ) , @@ -476,8 +484,8 @@ public void testDateRangeQueryTimezone() throws IOException { FeatureFlags.isEnabled(FeatureFlags.APPROXIMATE_POINT_RANGE_QUERY), is(true) ); - assertThat(parsedQuery, instanceOf(ApproximateIndexOrDocValuesQuery.class)); - parsedQuery = ((ApproximateIndexOrDocValuesQuery) parsedQuery).getApproximationQuery(); + assertThat(parsedQuery, instanceOf(ApproximateScoreQuery.class)); + parsedQuery = ((ApproximateScoreQuery) parsedQuery).getApproximationQuery(); assertThat(parsedQuery, instanceOf(ApproximateQuery.class)); // TODO what else can we assert diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index d9622aae4c378..aa10b7dc18381 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -382,6 +382,7 @@ public void testParseShardPath() { IndexId indexId = repoData.getIndices().values().iterator().next(); int shardCount = repoData.shardGenerations().getGens(indexId).size(); + // Version 2.17 has file name starting with indexId String shardPath = String.join( SnapshotShardPaths.DELIMITER, indexId.getId(), @@ -391,7 +392,19 @@ public void testParseShardPath() { "1" ); ShardInfo shardInfo = SnapshotShardPaths.parseShardPath(shardPath); + assertEquals(shardInfo.getIndexId(), indexId); + assertEquals(shardInfo.getShardCount(), shardCount); + // Version 2.17 has file name starting with snapshot_path_ + shardPath = String.join( + SnapshotShardPaths.DELIMITER, + SnapshotShardPaths.FILE_PREFIX + indexId.getId(), + indexId.getName(), + String.valueOf(shardCount), + String.valueOf(indexId.getShardPathType()), + "1" + ); + shardInfo = SnapshotShardPaths.parseShardPath(shardPath); assertEquals(shardInfo.getIndexId(), indexId); assertEquals(shardInfo.getShardCount(), shardCount); } diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java deleted file mode 100644 index 47f87c6abf629..0000000000000 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximateIndexOrDocValuesQueryTests.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.approximate; - -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.search.ConstantScoreWeight; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; -import org.apache.lucene.store.Directory; -import org.opensearch.search.internal.SearchContext; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; - -import static org.apache.lucene.document.LongPoint.pack; - -public class ApproximateIndexOrDocValuesQueryTests extends OpenSearchTestCase { - private Directory dir; - private IndexWriter w; - private DirectoryReader reader; - private IndexSearcher searcher; - - @Before - public void initSearcher() throws IOException { - dir = newDirectory(); - w = new IndexWriter(dir, newIndexWriterConfig()); - } - - @After - public void closeAllTheReaders() throws IOException { - reader.close(); - w.close(); - dir.close(); - } - - public void testApproximateIndexOrDocValuesQueryWeight() throws IOException { - - long l = Long.MIN_VALUE; - long u = Long.MAX_VALUE; - Query indexQuery = LongPoint.newRangeQuery("test-index", l, u); - - ApproximateQuery approximateIndexQuery = new ApproximatePointRangeQuery( - "test-index", - pack(new long[] { l }).bytes, - pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - }; - - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("test-index", l, u); - - ApproximateIndexOrDocValuesQuery approximateIndexOrDocValuesQuery = new ApproximateIndexOrDocValuesQuery( - indexQuery, - approximateIndexQuery, - dvQuery - ); - - reader = DirectoryReader.open(w); - searcher = newSearcher(reader); - - approximateIndexOrDocValuesQuery.resolvedQuery = indexQuery; - - Weight weight = approximateIndexOrDocValuesQuery.rewrite(searcher).createWeight(searcher, ScoreMode.COMPLETE, 1f); - - assertTrue(weight instanceof ConstantScoreWeight); - - ApproximateQuery approximateIndexQueryCanApproximate = new ApproximatePointRangeQuery( - "test-index", - pack(new long[] { l }).bytes, - pack(new long[] { u }).bytes, - new long[] { l }.length - ) { - protected String toString(int dimension, byte[] value) { - return Long.toString(LongPoint.decodeDimension(value, 0)); - } - - public boolean canApproximate(SearchContext context) { - return true; - } - }; - - ApproximateIndexOrDocValuesQuery approximateIndexOrDocValuesQueryCanApproximate = new ApproximateIndexOrDocValuesQuery( - indexQuery, - approximateIndexQueryCanApproximate, - dvQuery - ); - - approximateIndexOrDocValuesQueryCanApproximate.resolvedQuery = approximateIndexQueryCanApproximate; - - Weight approximateIndexOrDocValuesQueryCanApproximateWeight = approximateIndexOrDocValuesQueryCanApproximate.rewrite(searcher) - .createWeight(searcher, ScoreMode.COMPLETE, 1f); - - // we get ConstantScoreWeight since we're expecting to call ApproximatePointRangeQuery - assertTrue(approximateIndexOrDocValuesQueryCanApproximateWeight instanceof ConstantScoreWeight); - - } -} diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 4f59f9688fb7e..b4726bab50198 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -156,7 +156,7 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); - sizes.put(ThreadPool.Names.REMOTE_STATE_READ, ThreadPool::twiceAllocatedProcessors); + sizes.put(ThreadPool.Names.REMOTE_STATE_READ, n -> ThreadPool.boundedBy(4 * n, 4, 32)); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java index c5cf0dac4f807..45428865259c3 100644 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java @@ -401,14 +401,14 @@ public void testOnTaskCompleted() { ((QueryGroupTask) task).setQueryGroupId(mockThreadPool.getThreadContext()); queryGroupService.onTaskCompleted(task); - assertEquals(1, queryGroupState.completions.count()); + assertEquals(1, queryGroupState.totalCompletions.count()); // test non QueryGroupTask task = new Task(1, "simple", "test", "mock task", null, null); queryGroupService.onTaskCompleted(task); // It should still be 1 - assertEquals(1, queryGroupState.completions.count()); + assertEquals(1, queryGroupState.totalCompletions.count()); mockThreadPool.shutdown(); } diff --git a/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java b/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java index 1127b50399d24..016588acf1e24 100644 --- a/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/wlm/listeners/QueryGroupRequestOperationListenerTests.java @@ -29,6 +29,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; @@ -56,6 +57,10 @@ public void setUp() throws Exception { queryGroupStateMap = new HashMap<>(); testQueryGroupId = "safjgagnakg-3r3fads"; testThreadPool = new TestThreadPool("RejectionTestThreadPool"); + ClusterState mockClusterState = mock(ClusterState.class); + when(mockClusterService.state()).thenReturn(mockClusterState); + Metadata mockMetaData = mock(Metadata.class); + when(mockClusterState.metadata()).thenReturn(mockMetaData); queryGroupService = mock(QueryGroupService.class); sut = new QueryGroupRequestOperationListener(queryGroupService, testThreadPool); } @@ -90,7 +95,6 @@ public void testValidQueryGroupRequestFailure() throws IOException { 0, 1, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -104,7 +108,6 @@ public void testValidQueryGroupRequestFailure() throws IOException { 0, 0, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -155,7 +158,9 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { } }); - QueryGroupStats actualStats = queryGroupService.nodeStats(); + HashSet set = new HashSet<>(); + set.add("_all"); + QueryGroupStats actualStats = queryGroupService.nodeStats(set, null); QueryGroupStats expectedStats = new QueryGroupStats( Map.of( @@ -165,7 +170,6 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { 0, ITERATIONS, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -179,7 +183,6 @@ public void testMultiThreadedValidQueryGroupRequestFailures() { 0, 0, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -202,7 +205,6 @@ public void testInvalidQueryGroupFailure() throws IOException { 0, 0, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -216,7 +218,6 @@ public void testInvalidQueryGroupFailure() throws IOException { 0, 1, 0, - 0, Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats(0, 0, 0), @@ -254,11 +255,12 @@ private void assertSuccess( Collections.emptySet(), Collections.emptySet() ); - sut = new QueryGroupRequestOperationListener(queryGroupService, testThreadPool); sut.onRequestFailure(null, null); - QueryGroupStats actualStats = queryGroupService.nodeStats(); + HashSet set = new HashSet<>(); + set.add("_all"); + QueryGroupStats actualStats = queryGroupService.nodeStats(set, null); assertEquals(expectedStats, actualStats); } diff --git a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java b/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java index 566c4261d6878..c0dfa06a0fba1 100644 --- a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java +++ b/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStateTests.java @@ -23,13 +23,7 @@ public void testRandomQueryGroupsStateUpdates() { for (int i = 0; i < 25; i++) { if (i % 5 == 0) { - updaterThreads.add(new Thread(() -> { - if (randomBoolean()) { - queryGroupState.completions.inc(); - } else { - queryGroupState.shardCompletions.inc(); - } - })); + updaterThreads.add(new Thread(() -> { queryGroupState.totalCompletions.inc(); })); } else if (i % 5 == 1) { updaterThreads.add(new Thread(() -> { queryGroupState.totalRejections.inc(); @@ -63,7 +57,7 @@ public void testRandomQueryGroupsStateUpdates() { } }); - assertEquals(5, queryGroupState.getCompletions() + queryGroupState.getShardCompletions()); + assertEquals(5, queryGroupState.getTotalCompletions()); assertEquals(5, queryGroupState.getTotalRejections()); final long sumOfRejectionsDueToResourceTypes = queryGroupState.getResourceState().get(ResourceType.CPU).rejections.count() diff --git a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java b/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java index ac6d19580dacb..6fc4d178e54bc 100644 --- a/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java +++ b/server/src/test/java/org/opensearch/wlm/stats/QueryGroupStatsTests.java @@ -8,17 +8,24 @@ package org.opensearch.wlm.stats; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.AbstractWireSerializingTestCase; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; import org.opensearch.wlm.ResourceType; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static java.util.Collections.emptyMap; + public class QueryGroupStatsTests extends AbstractWireSerializingTestCase { public void testToXContent() throws IOException { @@ -31,7 +38,6 @@ public void testToXContent() throws IOException { 13, 2, 0, - 1213718, Map.of(ResourceType.CPU, new QueryGroupStats.ResourceStats(0.3, 13, 2)) ) ); @@ -41,7 +47,7 @@ public void testToXContent() throws IOException { queryGroupStats.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals( - "{\"query_groups\":{\"afakjklaj304041-afaka\":{\"completions\":123456789,\"shard_completions\":1213718,\"rejections\":13,\"failures\":2,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", + "{\"query_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", builder.toString() ); } @@ -61,7 +67,6 @@ protected QueryGroupStats createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), Map.of( ResourceType.CPU, new QueryGroupStats.ResourceStats( @@ -72,6 +77,13 @@ protected QueryGroupStats createTestInstance() { ) ) ); + DiscoveryNode discoveryNode = new DiscoveryNode( + "node", + OpenSearchTestCase.buildNewFakeTransportAddress(), + emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT) + ); return new QueryGroupStats(stats); } } diff --git a/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java b/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java new file mode 100644 index 0000000000000..6910ca7f9937c --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/stats/WlmStatsTests.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.stats; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; +import org.opensearch.wlm.ResourceType; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.mockito.Mockito.mock; + +public class WlmStatsTests extends AbstractWireSerializingTestCase { + + public void testToXContent() throws IOException { + final Map stats = new HashMap<>(); + final String queryGroupId = "afakjklaj304041-afaka"; + stats.put( + queryGroupId, + new QueryGroupStats.QueryGroupStatsHolder( + 123456789, + 13, + 2, + 0, + Map.of(ResourceType.CPU, new QueryGroupStats.ResourceStats(0.3, 13, 2)) + ) + ); + XContentBuilder builder = JsonXContent.contentBuilder(); + QueryGroupStats queryGroupStats = new QueryGroupStats(stats); + WlmStats wlmStats = new WlmStats(mock(DiscoveryNode.class), queryGroupStats); + builder.startObject(); + wlmStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertEquals( + "{\"query_groups\":{\"afakjklaj304041-afaka\":{\"total_completions\":123456789,\"total_rejections\":13,\"total_cancellations\":0,\"cpu\":{\"current_usage\":0.3,\"cancellations\":13,\"rejections\":2}}}}", + builder.toString() + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return WlmStats::new; + } + + @Override + protected WlmStats createTestInstance() { + DiscoveryNode discoveryNode = new DiscoveryNode( + "node", + OpenSearchTestCase.buildNewFakeTransportAddress(), + emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT) + ); + QueryGroupStatsTests queryGroupStatsTests = new QueryGroupStatsTests(); + return new WlmStats(discoveryNode, queryGroupStatsTests.createTestInstance()); + } +} diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index 74c75ea05b1f3..e3d75cbb69638 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -404,8 +404,6 @@ protected void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws if (bytesToSend > 0) { exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); } - if (randomBoolean()) { - exchange.getResponseBody().flush(); - } + exchange.getResponseBody().flush(); } }