Skip to content

Commit

Permalink
Merge branch 'main' into catSegmentsOpt
Browse files Browse the repository at this point in the history
Signed-off-by: Harsh Garg <gkharsh@amazon.com>
  • Loading branch information
Harsh Garg committed Sep 4, 2024
2 parents 73f60e5 + a60b668 commit ccca45e
Show file tree
Hide file tree
Showing 294 changed files with 16,916 additions and 3,348 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,27 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325))
- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895))
- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336))
- Support cancellation for cat shards and node stats API.([#13966](https://github.com/opensearch-project/OpenSearch/pull/13966))
- [Streaming Indexing] Introduce bulk HTTP API streaming flavor ([#15381](https://github.com/opensearch-project/OpenSearch/pull/15381))
- Add support for centralize snapshot creation with pinned timestamp ([#15124](https://github.com/opensearch-project/OpenSearch/pull/15124))
- Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326))
- [Workload Management] Add query group stats constructs ([#15343](https://github.com/opensearch-project/OpenSearch/pull/15343)))
- Add limit on number of processors for Ingest pipeline([#15460](https://github.com/opensearch-project/OpenSearch/pull/15465)).
- Add runAs to Subject interface and introduce IdentityAwarePlugin extension point ([#14630](https://github.com/opensearch-project/OpenSearch/pull/14630))
- Optimize NodeIndicesStats output behind flag ([#14454](https://github.com/opensearch-project/OpenSearch/pull/14454))
- [Workload Management] Add rejection logic for co-ordinator and shard level requests ([#15428](https://github.com/opensearch-project/OpenSearch/pull/15428)))
- Adding translog durability validation in index templates ([#15494](https://github.com/opensearch-project/OpenSearch/pull/15494))
- Add index creation using the context field ([#15290](https://github.com/opensearch-project/OpenSearch/pull/15290))
- Add fieldType to AbstractQueryBuilder and FieldSortBuilder ([#15328](https://github.com/opensearch-project/OpenSearch/pull/15328)))
- [Reader Writer Separation] Add searchOnly replica routing configuration ([#15410](https://github.com/opensearch-project/OpenSearch/pull/15410))
- [Range Queries] Add new approximateable query framework to short-circuit range queries ([#13788](https://github.com/opensearch-project/OpenSearch/pull/13788))
- [Workload Management] Add query group level failure tracking ([#15227](https://github.com/opensearch-project/OpenSearch/pull/15527))
- Add support to upload snapshot shard blobs with hashed prefix ([#15426](https://github.com/opensearch-project/OpenSearch/pull/15426))
- [Remote Publication] Add remote download stats ([#15291](https://github.com/opensearch-project/OpenSearch/pull/15291)))
- Add support for comma-separated list of index names to be used with Snapshot Status API ([#15409](https://github.com/opensearch-project/OpenSearch/pull/15409))
- Add prefix support to hashed prefix & infix path types on remote store ([#15557](https://github.com/opensearch-project/OpenSearch/pull/15557))
- Optimise snapshot deletion to speed up snapshot deletion and creation ([#15568](https://github.com/opensearch-project/OpenSearch/pull/15568))
- [Remote Publication] Added checksum validation for cluster state behind a cluster setting ([#15218](https://github.com/opensearch-project/OpenSearch/pull/15218))

### Dependencies
- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,14 +230,15 @@ public void testSnapshotsStatus() {
Map<String, String> expectedParams = new HashMap<>();
String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0];
String[] snapshots = RequestConvertersTests.randomIndicesNames(1, 5);
String[] indices = RequestConvertersTests.randomIndicesNames(1, 5);
StringBuilder snapshotNames = new StringBuilder(snapshots[0]);
for (int idx = 1; idx < snapshots.length; idx++) {
snapshotNames.append(",").append(snapshots[idx]);
}
boolean ignoreUnavailable = randomBoolean();
String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status";

SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots);
SnapshotsStatusRequest snapshotsStatusRequest = (new SnapshotsStatusRequest(repository, snapshots)).indices(indices);
RequestConvertersTests.setRandomClusterManagerTimeout(snapshotsStatusRequest, expectedParams);
snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable);
expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,18 @@
package org.opensearch.core.common.io.stream;

import org.apache.lucene.store.BufferedChecksum;
import org.opensearch.common.Nullable;
import org.opensearch.common.annotation.PublicApi;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.zip.CRC32;
import java.util.zip.Checksum;

Expand Down Expand Up @@ -90,4 +99,75 @@ public void reset() throws IOException {
public void resetDigest() {
digest.reset();
}

@Override
public void writeMap(@Nullable Map<String, Object> map) throws IOException {
Map<String, Object> newMap = new TreeMap<>(map);
writeGenericValue(newMap);
}

@Override
public <K, V> void writeMap(Map<K, V> map, final Writeable.Writer<K> keyWriter, final Writeable.Writer<V> valueWriter)
throws IOException {
writeVInt(map.size());
map.keySet().stream().sorted().forEachOrdered(key -> {
try {
keyWriter.write(this, key);
valueWriter.write(this, map.get(key));
} catch (IOException e) {
throw new RuntimeException("Failed to write map values.", e);
}
});
}

public <K, V> void writeMapValues(Map<K, V> map, final Writeable.Writer<V> valueWriter) throws IOException {
writeVInt(map.size());
map.keySet().stream().sorted().forEachOrdered(key -> {
try {
valueWriter.write(this, map.get(key));
} catch (IOException e) {
throw new RuntimeException("Failed to write map values.", e);
}
});
}

@Override
public void writeStringArray(String[] array) throws IOException {
String[] copyArray = Arrays.copyOf(array, array.length);
Arrays.sort(copyArray);
super.writeStringArray(copyArray);
}

@Override
public void writeVLongArray(long[] values) throws IOException {
long[] copyValues = Arrays.copyOf(values, values.length);
Arrays.sort(copyValues);
super.writeVLongArray(copyValues);
}

@Override
public void writeCollection(final Collection<? extends Writeable> collection) throws IOException {
List<? extends Writeable> sortedList = collection.stream().sorted().collect(Collectors.toList());
super.writeCollection(sortedList, (o, v) -> v.writeTo(o));
}

@Override
public void writeStringCollection(final Collection<String> collection) throws IOException {
List<String> listCollection = new ArrayList<>(collection);
Collections.sort(listCollection);
writeCollection(listCollection, StreamOutput::writeString);
}

@Override
public void writeOptionalStringCollection(final Collection<String> collection) throws IOException {
if (collection != null) {
List<String> listCollection = new ArrayList<>(collection);
Collections.sort(listCollection);
writeBoolean(true);
writeCollection(listCollection, StreamOutput::writeString);
} else {
writeBoolean(false);
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,7 @@ public final <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer
* @param keyWriter The key writer
* @param valueWriter The value writer
*/
public final <K, V> void writeMap(final Map<K, V> map, final Writer<K> keyWriter, final Writer<V> valueWriter) throws IOException {
public <K, V> void writeMap(final Map<K, V> map, final Writer<K> keyWriter, final Writer<V> valueWriter) throws IOException {
writeVInt(map.size());
for (final Map.Entry<K, V> entry : map.entrySet()) {
keyWriter.write(this, entry.getKey());
Expand Down Expand Up @@ -969,9 +969,13 @@ public <T extends Writeable> void writeOptionalArray(@Nullable T[] array) throws
}

public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException {
writeOptionalWriteable((out, writable) -> writable.writeTo(out), writeable);
}

public <T extends Writeable> void writeOptionalWriteable(final Writer<T> writer, @Nullable T writeable) throws IOException {
if (writeable != null) {
writeBoolean(true);
writeable.writeTo(this);
writer.write(this, writeable);
} else {
writeBoolean(false);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.core.common.io.stream;

import java.io.IOException;

/**
* Provides a method for serialization which will give ordered stream, creating same byte array on every invocation.
* This should be invoked with a stream that provides ordered serialization.
*/
public interface VerifiableWriteable extends Writeable {

void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOException;
}
Original file line number Diff line number Diff line change
Expand Up @@ -67,19 +67,11 @@ public void testUrlRepository() throws Exception {

logger.info("--> creating repository");
Path repositoryLocation = randomRepoPath();
assertAcked(
client.admin()
.cluster()
.preparePutRepository("test-repo")
.setType(FsRepository.TYPE)
.setSettings(
Settings.builder()
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
)
);

Settings.Builder settings = Settings.builder()
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES);
createRepository("test-repo", FsRepository.TYPE, settings);
createIndex("test-idx");
ensureGreen();

Expand Down Expand Up @@ -115,17 +107,10 @@ public void testUrlRepository() throws Exception {
cluster().wipeIndices("test-idx");

logger.info("--> create read-only URL repository");
assertAcked(
client.admin()
.cluster()
.preparePutRepository("url-repo")
.setType(URLRepository.TYPE)
.setSettings(
Settings.builder()
.put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString())
.put("list_directories", randomBoolean())
)
);
Settings.Builder settingsBuilder = Settings.builder()
.put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString())
.put("list_directories", randomBoolean());
createRepository("url-repo", URLRepository.TYPE, settingsBuilder);
logger.info("--> restore index after deletion");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
.cluster()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
import com.azure.storage.blob.models.BlobStorageException;
import org.opensearch.action.ActionRunnable;
import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.action.support.master.AcknowledgedResponse;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.MockSecureSettings;
import org.opensearch.common.settings.SecureSettings;
Expand All @@ -47,6 +46,7 @@
import org.opensearch.plugins.Plugin;
import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.junit.AfterClass;

import java.net.HttpURLConnection;
Expand All @@ -56,7 +56,6 @@
import reactor.core.scheduler.Schedulers;

import static org.hamcrest.Matchers.blankOrNullString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;

public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase {
Expand Down Expand Up @@ -103,17 +102,11 @@ protected SecureSettings credentials() {

@Override
protected void createRepository(String repoName) {
AcknowledgedResponse putRepositoryResponse = client().admin()
.cluster()
.preparePutRepository(repoName)
.setType("azure")
.setSettings(
Settings.builder()
.put("container", System.getProperty("test.azure.container"))
.put("base_path", System.getProperty("test.azure.base"))
)
.get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
Settings.Builder settings = Settings.builder()
.put("container", System.getProperty("test.azure.container"))
.put("base_path", System.getProperty("test.azure.base"));

OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "azure", settings);
if (Strings.hasText(System.getProperty("test.azure.sas_token"))) {
ensureSasTokenPermissions();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,19 +32,18 @@

package org.opensearch.repositories.gcs;

import org.opensearch.action.support.master.AcknowledgedResponse;
import org.opensearch.common.settings.MockSecureSettings;
import org.opensearch.common.settings.SecureSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.common.Strings;
import org.opensearch.plugins.Plugin;
import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase;
import org.opensearch.test.OpenSearchIntegTestCase;

import java.util.Base64;
import java.util.Collection;

import static org.hamcrest.Matchers.blankOrNullString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;

public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase {
Expand Down Expand Up @@ -84,16 +83,9 @@ protected SecureSettings credentials() {

@Override
protected void createRepository(final String repoName) {
AcknowledgedResponse putRepositoryResponse = client().admin()
.cluster()
.preparePutRepository("test-repo")
.setType("gcs")
.setSettings(
Settings.builder()
.put("bucket", System.getProperty("test.google.bucket"))
.put("base_path", System.getProperty("test.google.base", "/"))
)
.get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
Settings.Builder settings = Settings.builder()
.put("bucket", System.getProperty("test.google.bucket"))
.put("base_path", System.getProperty("test.google.base", "/"));
OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "gcs", settings);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;

import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse;
import org.opensearch.action.support.master.AcknowledgedResponse;
import org.opensearch.common.settings.MockSecureSettings;
import org.opensearch.common.settings.SecureSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.plugins.Plugin;
import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase;
import org.opensearch.test.OpenSearchIntegTestCase;

import java.util.Collection;

Expand All @@ -61,20 +61,13 @@ protected SecureSettings credentials() {

@Override
protected void createRepository(String repoName) {
AcknowledgedResponse putRepositoryResponse = client().admin()
.cluster()
.preparePutRepository(repoName)
.setType("hdfs")
.setSettings(
Settings.builder()
.put("uri", "hdfs:///")
.put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName())
.put("path", "foo")
.put("chunk_size", randomIntBetween(100, 1000) + "k")
.put("compress", randomBoolean())
)
.get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
Settings.Builder settings = Settings.builder()
.put("uri", "hdfs:///")
.put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName())
.put("path", "foo")
.put("chunk_size", randomIntBetween(100, 1000) + "k")
.put("compress", randomBoolean());
OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "hdfs", settings);
}

// HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed
Expand Down
Loading

0 comments on commit ccca45e

Please sign in to comment.