From d202d90df3991daaf3e20be325cac4b323f0ed23 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 8 Apr 2024 12:00:01 -0700 Subject: [PATCH 01/65] Adding support for handling multiple transport protocols (#12967) * Adding support for more than one protocol for transport Signed-off-by: Vacha Shah * Adding CHANGELOG entry Signed-off-by: Vacha Shah * Addressing comments Signed-off-by: Vacha Shah * Addressing comments Signed-off-by: Vacha Shah * Removing determineTransportProtocol Signed-off-by: Vacha Shah * Determine transport protocol only on first byte reference Signed-off-by: Vacha Shah * Making InboundBytesHandler closeable Signed-off-by: Vacha Shah * Fixing close() for InboundPipeline Signed-off-by: Vacha Shah * Adding DeprecatedAPI annotation to japicmp task Signed-off-by: Vacha Shah * Fixing for detect breaking changes workflow Signed-off-by: Vacha Shah * Fixing recursion Signed-off-by: Vacha Shah --------- Signed-off-by: Vacha Shah --- CHANGELOG.md | 1 + server/build.gradle | 2 +- .../java/org/opensearch/transport/Header.java | 8 +- .../transport/InboundBytesHandler.java | 32 ++ .../opensearch/transport/InboundDecoder.java | 4 +- .../opensearch/transport/InboundHandler.java | 427 +-------------- .../opensearch/transport/InboundMessage.java | 76 +-- .../opensearch/transport/InboundPipeline.java | 114 +--- .../transport/NativeMessageHandler.java | 494 ++++++++++++++++++ .../transport/ProtocolInboundMessage.java | 27 + .../transport/ProtocolMessageHandler.java | 27 + .../opensearch/transport/TcpTransport.java | 12 + .../NativeInboundBytesHandler.java | 167 ++++++ .../nativeprotocol/NativeInboundMessage.java | 149 ++++++ .../nativeprotocol/package-info.java | 10 + .../transport/InboundHandlerTests.java | 8 +- .../transport/InboundPipelineTests.java | 17 +- .../transport/OutboundHandlerTests.java | 5 +- 18 files changed, 1010 insertions(+), 570 deletions(-) create mode 100644 server/src/main/java/org/opensearch/transport/InboundBytesHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/NativeMessageHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 50b38450aa724..62cf5e45e80f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) - [Remote Store] Make translog transfer timeout configurable ([#12704](https://github.com/opensearch-project/OpenSearch/pull/12704)) - Reject Resize index requests (i.e, split, shrink and clone), While DocRep to SegRep migration is in progress.([#12686](https://github.com/opensearch-project/OpenSearch/pull/12686)) +- Add support for more than one protocol for transport ([#12967](https://github.com/opensearch-project/OpenSearch/pull/12967)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/build.gradle b/server/build.gradle index 7d52849844aaa..cb48142a61159 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -387,7 +387,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { onlyModified = true failOnModification = true ignoreMissingClasses = true - annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") dependsOn downloadSnapshot diff --git a/server/src/main/java/org/opensearch/transport/Header.java b/server/src/main/java/org/opensearch/transport/Header.java index 57c1da6f46aec..ac30df8dda02c 100644 --- a/server/src/main/java/org/opensearch/transport/Header.java +++ b/server/src/main/java/org/opensearch/transport/Header.java @@ -75,11 +75,11 @@ public int getNetworkMessageSize() { return networkMessageSize; } - Version getVersion() { + public Version getVersion() { return version; } - long getRequestId() { + public long getRequestId() { return requestId; } @@ -87,7 +87,7 @@ byte getStatus() { return status; } - boolean isRequest() { + public boolean isRequest() { return TransportStatus.isRequest(status); } @@ -99,7 +99,7 @@ boolean isError() { return TransportStatus.isError(status); } - boolean isHandshake() { + public boolean isHandshake() { return TransportStatus.isHandshake(status); } diff --git a/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java new file mode 100644 index 0000000000000..276891212e43f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.bytes.ReleasableBytesReference; + +import java.io.Closeable; +import java.io.IOException; +import java.util.function.BiConsumer; + +/** + * Interface for handling inbound bytes. Can be implemented by different transport protocols. + */ +public interface InboundBytesHandler extends Closeable { + + public void doHandleBytes( + TcpChannel channel, + ReleasableBytesReference reference, + BiConsumer messageHandler + ) throws IOException; + + public boolean canHandleBytes(ReleasableBytesReference reference); + + @Override + void close(); +} diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index 82fc09a985446..d6b7a98e876b3 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -50,8 +50,8 @@ */ public class InboundDecoder implements Releasable { - static final Object PING = new Object(); - static final Object END_CONTENT = new Object(); + public static final Object PING = new Object(); + public static final Object END_CONTENT = new Object(); private final Version version; private final PageCacheRecycler recycler; diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index a8315c3cae4e0..6492900c49a0e 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -32,35 +32,14 @@ package org.opensearch.transport; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.BytesRef; -import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.core.transport.TransportResponse; -import org.opensearch.telemetry.tracing.Span; -import org.opensearch.telemetry.tracing.SpanBuilder; -import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; -import java.io.EOFException; import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Collections; import java.util.Map; -import java.util.stream.Collectors; /** * Handler for inbound data @@ -69,21 +48,13 @@ */ public class InboundHandler { - private static final Logger logger = LogManager.getLogger(InboundHandler.class); - private final ThreadPool threadPool; - private final OutboundHandler outboundHandler; - private final NamedWriteableRegistry namedWriteableRegistry; - private final TransportHandshaker handshaker; - private final TransportKeepAlive keepAlive; - private final Transport.ResponseHandlers responseHandlers; - private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; private volatile long slowLogThresholdMs = Long.MAX_VALUE; - private final Tracer tracer; + private final Map protocolMessageHandlers; InboundHandler( ThreadPool threadPool, @@ -96,13 +67,19 @@ public class InboundHandler { Tracer tracer ) { this.threadPool = threadPool; - this.outboundHandler = outboundHandler; - this.namedWriteableRegistry = namedWriteableRegistry; - this.handshaker = handshaker; - this.keepAlive = keepAlive; - this.requestHandlers = requestHandlers; - this.responseHandlers = responseHandlers; - this.tracer = tracer; + this.protocolMessageHandlers = Map.of( + NativeInboundMessage.NATIVE_PROTOCOL, + new NativeMessageHandler( + threadPool, + outboundHandler, + namedWriteableRegistry, + handshaker, + requestHandlers, + responseHandlers, + tracer, + keepAlive + ) + ); } void setMessageListener(TransportMessageListener listener) { @@ -117,377 +94,17 @@ void setSlowLogThreshold(TimeValue slowLogThreshold) { this.slowLogThresholdMs = slowLogThreshold.getMillis(); } - void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { + void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) throws Exception { final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); - TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { - keepAlive.receiveKeepAlive(channel); - } else { - messageReceived(channel, message, startTime); - } - } - - // Empty stream constant to avoid instantiating a new stream for empty messages. - private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); - - private void messageReceived(TcpChannel channel, InboundMessage message, long startTime) throws IOException { - final InetSocketAddress remoteAddress = channel.getRemoteAddress(); - final Header header = message.getHeader(); - assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext existing = threadContext.stashContext()) { - // Place the context with the headers from the message - threadContext.setHeaders(header.getHeaders()); - threadContext.putTransient("_remote_address", remoteAddress); - if (header.isRequest()) { - handleRequest(channel, header, message); - } else { - // Responses do not support short circuiting currently - assert message.isShortCircuit() == false; - final TransportResponseHandler handler; - long requestId = header.getRequestId(); - if (header.isHandshake()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - TransportResponseHandler theHandler = responseHandlers.onResponseReceived( - requestId, - messageListener - ); - if (theHandler == null && header.isError()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - handler = theHandler; - } - } - // ignore if its null, the service logs it - if (handler != null) { - final StreamInput streamInput; - if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { - streamInput = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(streamInput, header.getVersion()); - if (header.isError()) { - handlerResponseError(requestId, streamInput, handler); - } else { - handleResponse(requestId, remoteAddress, streamInput, handler); - } - } else { - assert header.isError() == false; - handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); - } - } - - } - } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; - final long logThreshold = slowLogThresholdMs; - if (logThreshold > 0 && took > logThreshold) { - logger.warn( - "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", - message, - took, - logThreshold - ); - } - } - } - - private Map> extractHeaders(Map headers) { - return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); - } - - private void handleRequest(TcpChannel channel, Header header, InboundMessage message) throws IOException { - final String action = header.getActionName(); - final long requestId = header.getRequestId(); - final Version version = header.getVersion(); - final Map> headers = extractHeaders(header.getHeaders().v1()); - Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); - try (SpanScope spanScope = tracer.withSpanInScope(span)) { - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - handshaker.handleHandshake(traceableTransportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, traceableTransportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, traceableTransportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, traceableTransportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), traceableTransportChannel, e); - } - } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); - } - } - } catch (Exception e) { - sendErrorResponse(action, traceableTransportChannel, e); - } - } - } catch (Exception e) { - span.setError(e); - span.endSpan(); - throw e; - } - } - - /** - * Creates new request instance out of input stream. Throws IllegalStateException if the end of - * the stream was reached before the request is fully deserialized from the stream. - * @param transport request type - * @param requestId request identifier - * @param action action name - * @param stream stream - * @param reg request handler registry - * @return new request instance - * @throws IOException IOException - * @throws IllegalStateException IllegalStateException - */ - private T newRequest( - final long requestId, - final String action, - final StreamInput stream, - final RequestHandlerRegistry reg - ) throws IOException { - try { - return reg.newRequest(stream); - } catch (final EOFException e) { - // Another favor of (de)serialization issues is when stream contains less bytes than - // the request handler needs to deserialize the payload. - throw new IllegalStateException( - "Message fully read (request) but more data is expected for requestId [" - + requestId - + "], action [" - + action - + "]; resetting", - e - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param action action name - * @param stream stream - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - final int nextByte = stream.read(); - - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (request) for requestId [" - + requestId - + "], action [" - + action - + "], available [" - + stream.available() - + "]; resetting" - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param handler response handler - * @param stream stream - * @param error "true" if response represents error, "false" otherwise - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed( - final long requestId, - final TransportResponseHandler handler, - final StreamInput stream, - final boolean error - ) throws IOException { - if (stream != EMPTY_STREAM_INPUT) { - // Check the entire message has been read - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (response) for requestId [" - + requestId - + "], handler [" - + handler - + "], error [" - + error - + "]; resetting" - ); - } - } - } - - private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); - } - } - - private void handleResponse( - final long requestId, - InetSocketAddress remoteAddress, - final StreamInput stream, - final TransportResponseHandler handler - ) { - final T response; - try { - response = handler.read(stream); - response.remoteAddress(new TransportAddress(remoteAddress)); - checkStreamIsFullyConsumed(requestId, handler, stream, false); - } catch (Exception e) { - final Exception serializationException = new TransportSerializationException( - "Failed to deserialize response from handler [" + handler + "]", - e - ); - logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); - handleException(handler, serializationException); - return; - } - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - doHandleResponse(handler, response); - } else { - threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); - } + messageReceivedFromPipeline(channel, message, startTime); } - private void doHandleResponse(TransportResponseHandler handler, T response) { - try { - handler.handleResponse(response); - } catch (Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - } - - private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { - Exception error; - try { - error = stream.readException(); - checkStreamIsFullyConsumed(requestId, handler, stream, true); - } catch (Exception e) { - error = new TransportSerializationException( - "Failed to deserialize exception response from stream for handler [" + handler + "]", - e - ); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - }); - } - - private StreamInput namedWriteableStream(StreamInput delegate) { - return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); - } - - static void assertRemoteVersion(StreamInput in, Version version) { - assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; - } - - /** - * Internal request handler - * - * @opensearch.internal - */ - private static class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final T request; - private final TransportChannel transportChannel; - - RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); + private void messageReceivedFromPipeline(TcpChannel channel, ProtocolInboundMessage message, long startTime) throws IOException { + ProtocolMessageHandler protocolMessageHandler = protocolMessageHandlers.get(message.getProtocol()); + if (protocolMessageHandler == null) { + throw new IllegalStateException("No protocol message handler found for protocol: " + message.getProtocol()); } + protocolMessageHandler.messageReceived(channel, message, startTime, slowLogThresholdMs, messageListener); } } diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index 71c4d6973505d..5c68257557061 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -32,105 +32,77 @@ package org.opensearch.transport; -import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.annotation.DeprecatedApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; /** * Inbound data as a message - * + * This api is deprecated, please use {@link org.opensearch.transport.nativeprotocol.NativeInboundMessage} instead. * @opensearch.api */ -@PublicApi(since = "1.0.0") -public class InboundMessage implements Releasable { +@DeprecatedApi(since = "2.14.0") +public class InboundMessage implements Releasable, ProtocolInboundMessage { - private final Header header; - private final ReleasableBytesReference content; - private final Exception exception; - private final boolean isPing; - private Releasable breakerRelease; - private StreamInput streamInput; + private final NativeInboundMessage nativeInboundMessage; public InboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { - this.header = header; - this.content = content; - this.breakerRelease = breakerRelease; - this.exception = null; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, content, breakerRelease); } public InboundMessage(Header header, Exception exception) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = exception; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, exception); } public InboundMessage(Header header, boolean isPing) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = null; - this.isPing = isPing; + this.nativeInboundMessage = new NativeInboundMessage(header, isPing); } public Header getHeader() { - return header; + return this.nativeInboundMessage.getHeader(); } public int getContentLength() { - if (content == null) { - return 0; - } else { - return content.length(); - } + return this.nativeInboundMessage.getContentLength(); } public Exception getException() { - return exception; + return this.nativeInboundMessage.getException(); } public boolean isPing() { - return isPing; + return this.nativeInboundMessage.isPing(); } public boolean isShortCircuit() { - return exception != null; + return this.nativeInboundMessage.getException() != null; } public Releasable takeBreakerReleaseControl() { - final Releasable toReturn = breakerRelease; - breakerRelease = null; - if (toReturn != null) { - return toReturn; - } else { - return () -> {}; - } + return this.nativeInboundMessage.takeBreakerReleaseControl(); } public StreamInput openOrGetStreamInput() throws IOException { - assert isPing == false && content != null; - if (streamInput == null) { - streamInput = content.streamInput(); - streamInput.setVersion(header.getVersion()); - } - return streamInput; + return this.nativeInboundMessage.openOrGetStreamInput(); } @Override public void close() { - IOUtils.closeWhileHandlingException(streamInput); - Releasables.closeWhileHandlingException(content, breakerRelease); + this.nativeInboundMessage.close(); } @Override public String toString() { - return "InboundMessage{" + header + "}"; + return this.nativeInboundMessage.toString(); } + + @Override + public String getProtocol() { + return this.nativeInboundMessage.getProtocol(); + } + } diff --git a/server/src/main/java/org/opensearch/transport/InboundPipeline.java b/server/src/main/java/org/opensearch/transport/InboundPipeline.java index dd4690e5e6abf..5cee3bb975223 100644 --- a/server/src/main/java/org/opensearch/transport/InboundPipeline.java +++ b/server/src/main/java/org/opensearch/transport/InboundPipeline.java @@ -38,11 +38,11 @@ import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.nativeprotocol.NativeInboundBytesHandler; import java.io.IOException; import java.util.ArrayDeque; -import java.util.ArrayList; +import java.util.List; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; @@ -55,17 +55,16 @@ */ public class InboundPipeline implements Releasable { - private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); - private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); - private final LongSupplier relativeTimeInMillis; private final StatsTracker statsTracker; private final InboundDecoder decoder; private final InboundAggregator aggregator; - private final BiConsumer messageHandler; private Exception uncaughtException; private final ArrayDeque pending = new ArrayDeque<>(2); private boolean isClosed = false; + private final BiConsumer messageHandler; + private final List protocolBytesHandlers; + private InboundBytesHandler currentHandler; public InboundPipeline( Version version, @@ -74,7 +73,7 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, Supplier circuitBreaker, Function> registryFunction, - BiConsumer messageHandler + BiConsumer messageHandler ) { this( statsTracker, @@ -90,18 +89,23 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, InboundDecoder decoder, InboundAggregator aggregator, - BiConsumer messageHandler + BiConsumer messageHandler ) { this.relativeTimeInMillis = relativeTimeInMillis; this.statsTracker = statsTracker; this.decoder = decoder; this.aggregator = aggregator; + this.protocolBytesHandlers = List.of(new NativeInboundBytesHandler(pending, decoder, aggregator, statsTracker)); this.messageHandler = messageHandler; } @Override public void close() { isClosed = true; + if (currentHandler != null) { + currentHandler.close(); + currentHandler = null; + } Releasables.closeWhileHandlingException(decoder, aggregator); Releasables.closeWhileHandlingException(pending); pending.clear(); @@ -124,95 +128,21 @@ public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference statsTracker.markBytesRead(reference.length()); pending.add(reference.retain()); - final ArrayList fragments = fragmentList.get(); - boolean continueHandling = true; - - while (continueHandling && isClosed == false) { - boolean continueDecoding = true; - while (continueDecoding && pending.isEmpty() == false) { - try (ReleasableBytesReference toDecode = getPendingBytes()) { - final int bytesDecoded = decoder.decode(toDecode, fragments::add); - if (bytesDecoded != 0) { - releasePendingBytes(bytesDecoded); - if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { - continueDecoding = false; - } - } else { - continueDecoding = false; - } - } - } - - if (fragments.isEmpty()) { - continueHandling = false; - } else { - try { - forwardFragments(channel, fragments); - } finally { - for (Object fragment : fragments) { - if (fragment instanceof ReleasableBytesReference) { - ((ReleasableBytesReference) fragment).close(); - } - } - fragments.clear(); - } - } - } - } - - private void forwardFragments(TcpChannel channel, ArrayList fragments) throws IOException { - for (Object fragment : fragments) { - if (fragment instanceof Header) { - assert aggregator.isAggregating() == false; - aggregator.headerReceived((Header) fragment); - } else if (fragment == InboundDecoder.PING) { - assert aggregator.isAggregating() == false; - messageHandler.accept(channel, PING_MESSAGE); - } else if (fragment == InboundDecoder.END_CONTENT) { - assert aggregator.isAggregating(); - try (InboundMessage aggregated = aggregator.finishAggregation()) { - statsTracker.markMessageReceived(); - messageHandler.accept(channel, aggregated); + // If we don't have a current handler, we should try to find one based on the protocol of the incoming bytes. + if (currentHandler == null) { + for (InboundBytesHandler handler : protocolBytesHandlers) { + if (handler.canHandleBytes(reference)) { + currentHandler = handler; + break; } - } else { - assert aggregator.isAggregating(); - assert fragment instanceof ReleasableBytesReference; - aggregator.aggregate((ReleasableBytesReference) fragment); } } - } - private boolean endOfMessage(Object fragment) { - return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; - } - - private ReleasableBytesReference getPendingBytes() { - if (pending.size() == 1) { - return pending.peekFirst().retain(); + // If we have a current handler determined based on protocol, we should continue to use it for the fragmented bytes. + if (currentHandler != null) { + currentHandler.doHandleBytes(channel, reference, messageHandler); } else { - final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; - int index = 0; - for (ReleasableBytesReference pendingReference : pending) { - bytesReferences[index] = pendingReference.retain(); - ++index; - } - final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); - return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); - } - } - - private void releasePendingBytes(int bytesConsumed) { - int bytesToRelease = bytesConsumed; - while (bytesToRelease != 0) { - try (ReleasableBytesReference reference = pending.pollFirst()) { - assert reference != null; - if (bytesToRelease < reference.length()) { - pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); - bytesToRelease -= bytesToRelease; - } else { - bytesToRelease -= reference.length(); - } - } + throw new IllegalStateException("No bytes handler found for the incoming transport protocol"); } } } diff --git a/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java new file mode 100644 index 0000000000000..861b95a8098f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java @@ -0,0 +1,494 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; +import org.opensearch.threadpool.ThreadPool; + +import java.io.EOFException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Native handler for inbound data + * + * @opensearch.internal + */ +public class NativeMessageHandler implements ProtocolMessageHandler { + + private static final Logger logger = LogManager.getLogger(NativeMessageHandler.class); + + private final ThreadPool threadPool; + private final OutboundHandler outboundHandler; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportHandshaker handshaker; + private final TransportKeepAlive keepAlive; + private final Transport.ResponseHandlers responseHandlers; + private final Transport.RequestHandlers requestHandlers; + + private final Tracer tracer; + + NativeMessageHandler( + ThreadPool threadPool, + OutboundHandler outboundHandler, + NamedWriteableRegistry namedWriteableRegistry, + TransportHandshaker handshaker, + Transport.RequestHandlers requestHandlers, + Transport.ResponseHandlers responseHandlers, + Tracer tracer, + TransportKeepAlive keepAlive + ) { + this.threadPool = threadPool; + this.outboundHandler = outboundHandler; + this.namedWriteableRegistry = namedWriteableRegistry; + this.handshaker = handshaker; + this.requestHandlers = requestHandlers; + this.responseHandlers = responseHandlers; + this.tracer = tracer; + this.keepAlive = keepAlive; + } + + // Empty stream constant to avoid instantiating a new stream for empty messages. + private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); + + @Override + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + InboundMessage inboundMessage = (InboundMessage) message; + TransportLogger.logInboundMessage(channel, inboundMessage); + if (inboundMessage.isPing()) { + keepAlive.receiveKeepAlive(channel); + } else { + handleMessage(channel, inboundMessage, startTime, slowLogThresholdMs, messageListener); + } + } + + private void handleMessage( + TcpChannel channel, + InboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + final InetSocketAddress remoteAddress = channel.getRemoteAddress(); + final Header header = message.getHeader(); + assert header.needsToReadVariableHeader() == false; + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + // Place the context with the headers from the message + threadContext.setHeaders(header.getHeaders()); + threadContext.putTransient("_remote_address", remoteAddress); + if (header.isRequest()) { + handleRequest(channel, header, message, messageListener); + } else { + // Responses do not support short circuiting currently + assert message.isShortCircuit() == false; + final TransportResponseHandler handler; + long requestId = header.getRequestId(); + if (header.isHandshake()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + TransportResponseHandler theHandler = responseHandlers.onResponseReceived( + requestId, + messageListener + ); + if (theHandler == null && header.isError()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + final StreamInput streamInput; + if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { + streamInput = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(streamInput, header.getVersion()); + if (header.isError()) { + handlerResponseError(requestId, streamInput, handler); + } else { + handleResponse(requestId, remoteAddress, streamInput, handler); + } + } else { + assert header.isError() == false; + handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); + } + } + + } + } finally { + final long took = threadPool.relativeTimeInMillis() - startTime; + final long logThreshold = slowLogThresholdMs; + if (logThreshold > 0 && took > logThreshold) { + logger.warn( + "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", + message, + took, + logThreshold + ); + } + } + } + + private Map> extractHeaders(Map headers) { + return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); + } + + private void handleRequest( + TcpChannel channel, + Header header, + InboundMessage message, + TransportMessageListener messageListener + ) throws IOException { + final String action = header.getActionName(); + final long requestId = header.getRequestId(); + final Version version = header.getVersion(); + final Map> headers = extractHeaders(header.getHeaders().v1()); + Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { + messageListener.onRequestReceived(requestId, action); + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); + } else { + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); + } + } + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } + } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } + } + + /** + * Creates new request instance out of input stream. Throws IllegalStateException if the end of + * the stream was reached before the request is fully deserialized from the stream. + * @param transport request type + * @param requestId request identifier + * @param action action name + * @param stream stream + * @param reg request handler registry + * @return new request instance + * @throws IOException IOException + * @throws IllegalStateException IllegalStateException + */ + private T newRequest( + final long requestId, + final String action, + final StreamInput stream, + final RequestHandlerRegistry reg + ) throws IOException { + try { + return reg.newRequest(stream); + } catch (final EOFException e) { + // Another favor of (de)serialization issues is when stream contains less bytes than + // the request handler needs to deserialize the payload. + throw new IllegalStateException( + "Message fully read (request) but more data is expected for requestId [" + + requestId + + "], action [" + + action + + "]; resetting", + e + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param action action name + * @param stream stream + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (request) for requestId [" + + requestId + + "], action [" + + action + + "], available [" + + stream.available() + + "]; resetting" + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param handler response handler + * @param stream stream + * @param error "true" if response represents error, "false" otherwise + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed( + final long requestId, + final TransportResponseHandler handler, + final StreamInput stream, + final boolean error + ) throws IOException { + if (stream != EMPTY_STREAM_INPUT) { + // Check the entire message has been read + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (response) for requestId [" + + requestId + + "], handler [" + + handler + + "], error [" + + error + + "]; resetting" + ); + } + } + } + + private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); + } + } + + private void handleResponse( + final long requestId, + InetSocketAddress remoteAddress, + final StreamInput stream, + final TransportResponseHandler handler + ) { + final T response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + checkStreamIsFullyConsumed(requestId, handler, stream, false); + } catch (Exception e) { + final Exception serializationException = new TransportSerializationException( + "Failed to deserialize response from handler [" + handler + "]", + e + ); + logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); + handleException(handler, serializationException); + return; + } + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + doHandleResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); + } + } + + private void doHandleResponse(TransportResponseHandler handler, T response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + } + + private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + checkStreamIsFullyConsumed(requestId, handler, stream, true); + } catch (Exception e) { + error = new TransportSerializationException( + "Failed to deserialize exception response from stream for handler [" + handler + "]", + e + ); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + private StreamInput namedWriteableStream(StreamInput delegate) { + return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); + } + + static void assertRemoteVersion(StreamInput in, Version version) { + assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; + } + + /** + * Internal request handler + * + * @opensearch.internal + */ + private static class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final T request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java new file mode 100644 index 0000000000000..43c2d5ffe4c96 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Base class for inbound data as a message. + * Different implementations are used for different protocols. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public interface ProtocolInboundMessage { + + /** + * @return the protocol used to encode this message + */ + public String getProtocol(); + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java new file mode 100644 index 0000000000000..714d91d1e74c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import java.io.IOException; + +/** + * Interface for message handlers based on transport protocol. + * + * @opensearch.internal + */ +public interface ProtocolMessageHandler { + + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 7d45152089f37..e32bba5e836d3 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -762,12 +762,24 @@ protected void serverAcceptedChannel(TcpChannel channel) { protected abstract void stopInternal(); /** + * @deprecated use {@link #inboundMessage(TcpChannel, ProtocolInboundMessage)} * Handles inbound message that has been decoded. * * @param channel the channel the message is from * @param message the message */ + @Deprecated(since = "2.14.0", forRemoval = true) public void inboundMessage(TcpChannel channel, InboundMessage message) { + inboundMessage(channel, (ProtocolInboundMessage) message); + } + + /** + * Handles inbound message that has been decoded. + * + * @param channel the channel the message is from + * @param message the message + */ + public void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) { try { inboundHandler.inboundMessage(channel, message); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java new file mode 100644 index 0000000000000..a8a4c0da7ec0f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.Header; +import org.opensearch.transport.InboundAggregator; +import org.opensearch.transport.InboundBytesHandler; +import org.opensearch.transport.InboundDecoder; +import org.opensearch.transport.InboundMessage; +import org.opensearch.transport.ProtocolInboundMessage; +import org.opensearch.transport.StatsTracker; +import org.opensearch.transport.TcpChannel; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.function.BiConsumer; + +/** + * Handler for inbound bytes for the native protocol. + */ +public class NativeInboundBytesHandler implements InboundBytesHandler { + + private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); + private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); + + private final ArrayDeque pending; + private final InboundDecoder decoder; + private final InboundAggregator aggregator; + private final StatsTracker statsTracker; + private boolean isClosed = false; + + public NativeInboundBytesHandler( + ArrayDeque pending, + InboundDecoder decoder, + InboundAggregator aggregator, + StatsTracker statsTracker + ) { + this.pending = pending; + this.decoder = decoder; + this.aggregator = aggregator; + this.statsTracker = statsTracker; + } + + @Override + public void close() { + isClosed = true; + } + + @Override + public boolean canHandleBytes(ReleasableBytesReference reference) { + return true; + } + + @Override + public void doHandleBytes( + TcpChannel channel, + ReleasableBytesReference reference, + BiConsumer messageHandler + ) throws IOException { + final ArrayList fragments = fragmentList.get(); + boolean continueHandling = true; + + while (continueHandling && isClosed == false) { + boolean continueDecoding = true; + while (continueDecoding && pending.isEmpty() == false) { + try (ReleasableBytesReference toDecode = getPendingBytes()) { + final int bytesDecoded = decoder.decode(toDecode, fragments::add); + if (bytesDecoded != 0) { + releasePendingBytes(bytesDecoded); + if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { + continueDecoding = false; + } + } else { + continueDecoding = false; + } + } + } + + if (fragments.isEmpty()) { + continueHandling = false; + } else { + try { + forwardFragments(channel, fragments, messageHandler); + } finally { + for (Object fragment : fragments) { + if (fragment instanceof ReleasableBytesReference) { + ((ReleasableBytesReference) fragment).close(); + } + } + fragments.clear(); + } + } + } + } + + private ReleasableBytesReference getPendingBytes() { + if (pending.size() == 1) { + return pending.peekFirst().retain(); + } else { + final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; + int index = 0; + for (ReleasableBytesReference pendingReference : pending) { + bytesReferences[index] = pendingReference.retain(); + ++index; + } + final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); + return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); + } + } + + private void releasePendingBytes(int bytesConsumed) { + int bytesToRelease = bytesConsumed; + while (bytesToRelease != 0) { + try (ReleasableBytesReference reference = pending.pollFirst()) { + assert reference != null; + if (bytesToRelease < reference.length()) { + pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); + bytesToRelease -= bytesToRelease; + } else { + bytesToRelease -= reference.length(); + } + } + } + } + + private boolean endOfMessage(Object fragment) { + return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; + } + + private void forwardFragments( + TcpChannel channel, + ArrayList fragments, + BiConsumer messageHandler + ) throws IOException { + for (Object fragment : fragments) { + if (fragment instanceof Header) { + assert aggregator.isAggregating() == false; + aggregator.headerReceived((Header) fragment); + } else if (fragment == InboundDecoder.PING) { + assert aggregator.isAggregating() == false; + messageHandler.accept(channel, PING_MESSAGE); + } else if (fragment == InboundDecoder.END_CONTENT) { + assert aggregator.isAggregating(); + try (InboundMessage aggregated = aggregator.finishAggregation()) { + statsTracker.markMessageReceived(); + messageHandler.accept(channel, aggregated); + } + } else { + assert aggregator.isAggregating(); + assert fragment instanceof ReleasableBytesReference; + aggregator.aggregate((ReleasableBytesReference) fragment); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java new file mode 100644 index 0000000000000..1143f129b6319 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.Header; +import org.opensearch.transport.ProtocolInboundMessage; + +import java.io.IOException; + +/** + * Inbound data as a message + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class NativeInboundMessage implements Releasable, ProtocolInboundMessage { + + /** + * The protocol used to encode this message + */ + public static String NATIVE_PROTOCOL = "native"; + + private final Header header; + private final ReleasableBytesReference content; + private final Exception exception; + private final boolean isPing; + private Releasable breakerRelease; + private StreamInput streamInput; + + public NativeInboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { + this.header = header; + this.content = content; + this.breakerRelease = breakerRelease; + this.exception = null; + this.isPing = false; + } + + public NativeInboundMessage(Header header, Exception exception) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = exception; + this.isPing = false; + } + + public NativeInboundMessage(Header header, boolean isPing) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = null; + this.isPing = isPing; + } + + @Override + public String getProtocol() { + return NATIVE_PROTOCOL; + } + + public Header getHeader() { + return header; + } + + public int getContentLength() { + if (content == null) { + return 0; + } else { + return content.length(); + } + } + + public Exception getException() { + return exception; + } + + public boolean isPing() { + return isPing; + } + + public boolean isShortCircuit() { + return exception != null; + } + + public Releasable takeBreakerReleaseControl() { + final Releasable toReturn = breakerRelease; + breakerRelease = null; + if (toReturn != null) { + return toReturn; + } else { + return () -> {}; + } + } + + public StreamInput openOrGetStreamInput() throws IOException { + assert isPing == false && content != null; + if (streamInput == null) { + streamInput = content.streamInput(); + streamInput.setVersion(header.getVersion()); + } + return streamInput; + } + + @Override + public void close() { + IOUtils.closeWhileHandlingException(streamInput); + Releasables.closeWhileHandlingException(content, breakerRelease); + } + + @Override + public String toString() { + return "InboundMessage{" + header + "}"; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java new file mode 100644 index 0000000000000..84f6d7d0ec5c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Native transport protocol package. */ +package org.opensearch.transport.nativeprotocol; diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index e002297911788..0d171e17e70e1 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -275,11 +275,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws // response so we must just close the connection on an error. To avoid the failure disappearing into a black hole we at least log // it. - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "could not send error response to handshake" ) @@ -308,11 +308,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws } public void testLogsSlowInboundProcessing() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "handling inbound transport message " ) diff --git a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java index ae4b537223394..2dfe8a0dd8590 100644 --- a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java @@ -72,24 +72,25 @@ public void testPipelineHandling() throws IOException { final List> expected = new ArrayList<>(); final List> actual = new ArrayList<>(); final List toRelease = new ArrayList<>(); - final BiConsumer messageHandler = (c, m) -> { + final BiConsumer messageHandler = (c, m) -> { try { - final Header header = m.getHeader(); + InboundMessage message = (InboundMessage) m; + final Header header = message.getHeader(); final MessageData actualData; final Version version = header.getVersion(); final boolean isRequest = header.isRequest(); final long requestId = header.getRequestId(); final boolean isCompressed = header.isCompressed(); - if (m.isShortCircuit()) { + if (message.isShortCircuit()) { actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), null); } else if (isRequest) { - final TestRequest request = new TestRequest(m.openOrGetStreamInput()); + final TestRequest request = new TestRequest(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), request.value); } else { - final TestResponse response = new TestResponse(m.openOrGetStreamInput()); + final TestResponse response = new TestResponse(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, null, response.value); } - actual.add(new Tuple<>(actualData, m.getException())); + actual.add(new Tuple<>(actualData, message.getException())); } catch (IOException e) { throw new AssertionError(e); } @@ -214,7 +215,7 @@ public void testPipelineHandling() throws IOException { } public void testDecodeExceptionIsPropagated() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -268,7 +269,7 @@ public void testDecodeExceptionIsPropagated() throws IOException { } public void testEnsureBodyIsNotPrematurelyReleased() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index ff99435f765d8..36ba409a2de03 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -97,8 +97,9 @@ public void setUp() throws Exception { final InboundAggregator aggregator = new InboundAggregator(breaker, (Predicate) action -> true); pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, (c, m) -> { try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - Streams.copy(m.openOrGetStreamInput(), streamOutput); - message.set(new Tuple<>(m.getHeader(), streamOutput.bytes())); + InboundMessage m1 = (InboundMessage) m; + Streams.copy(m1.openOrGetStreamInput(), streamOutput); + message.set(new Tuple<>(m1.getHeader(), streamOutput.bytes())); } catch (IOException e) { throw new AssertionError(e); } From bad49efab29bd5161458f182b87c5d78d8de3cac Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Tue, 9 Apr 2024 13:45:43 +0530 Subject: [PATCH 02/65] Add implementation for remote store path types (#13103) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 25 +- .../opensearch/remotestore/RemoteStoreIT.java | 48 +-- .../RemoteStoreRefreshListenerIT.java | 8 +- .../snapshots/DeleteSnapshotIT.java | 27 +- .../opensearch/common/blobstore/BlobPath.java | 9 + .../index/remote/RemoteStoreEnums.java | 30 +- .../index/remote/RemoteStoreUtils.java | 15 + .../index/remote/RemoteStoreEnumsTests.java | 369 +++++++++++++++++- .../RemoteStorePathStrategyResolverTests.java | 45 +++ .../index/remote/RemoteStoreUtilsTests.java | 30 ++ .../BlobStoreRepositoryHelperTests.java | 16 +- .../AbstractSnapshotIntegTestCase.java | 21 +- .../test/OpenSearchIntegTestCase.java | 3 + .../opensearch/test/OpenSearchTestCase.java | 40 ++ 14 files changed, 623 insertions(+), 63 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index ec98d5ff531cb..f5f9d515f2712 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -21,6 +21,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -56,6 +57,10 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -279,6 +284,11 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { String restoredIndexName1version1 = indexName1 + "-restored-1"; String restoredIndexName1version2 = indexName1 + "-restored-2"; + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); Settings indexSettings = getIndexSettings(1, 0).build(); @@ -476,12 +486,15 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { } void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { - String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); - - Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); - Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); - Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); - Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + Client client = client(); + String path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path remoteTranslogMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, DATA).buildAsString(); + Path remoteTranslogDataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path segmentMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(remoteRepoPath + "/" + path); try ( Stream translogMetadata = Files.list(remoteTranslogMetadataPath); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 46e5b7aa28318..b767ffff05e3a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -23,6 +23,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; @@ -57,7 +58,11 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -182,13 +187,9 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); - + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. @@ -212,12 +213,8 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); @@ -231,12 +228,8 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); @@ -250,12 +243,9 @@ public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Excepti createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(12, 18); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); @@ -589,12 +579,8 @@ public void testFallbackToNodeToNodeSegmentCopy() throws Exception { flushAndRefresh(INDEX_NAME); // 3. Delete data from remote segment store - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(segmentRepoPath + "/" + shardPath); try (Stream files = Files.list(segmentDataPath)) { files.forEach(p -> { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index acdb21d072320..65016c4976157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -22,7 +23,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { @@ -45,8 +49,10 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { IndicesStatsResponse response = client().admin().indices().stats(new IndicesStatsRequest()).get(); assertEquals(1, response.getShards().length); + String indexName = response.getShards()[0].getShardRouting().index().getName(); String indexUuid = response.getShards()[0].getShardRouting().index().getUUID(); - Path segmentDataRepoPath = location.resolve(String.format(Locale.ROOT, "%s/0/segments/data", indexUuid)); + String shardPath = getShardLevelBlobPath(client(), indexName, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataRepoPath = location.resolve(shardPath); String segmentDataLocalPath = String.format(Locale.ROOT, "%s/indices/%s/0/index", response.getShards()[0].getDataPath(), indexUuid); logger.info("--> Verify that the segment files are same on local and repository eventually"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 78827849a8037..e688a4491b1a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -14,9 +14,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -27,6 +32,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.comparesEqualTo; @@ -307,7 +314,21 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { SnapshotInfo snapshotInfo1 = createFullSnapshot(snapshotRepoName, "snap1"); SnapshotInfo snapshotInfo2 = createFullSnapshot(snapshotRepoName, "snap2"); - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(REMOTE_REPO_NAME); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreEnabledIndexName, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); + BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); + String[] lockFiles; + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); // delete remote store index @@ -320,7 +341,9 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { .get(); assertAcked(deleteSnapshotResponse); - lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); assertTrue(lockFiles[0].contains(snapshotInfo2.snapshotId().getUUID())); diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index 763594ed52977..6f3e8be7c28b8 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -79,6 +79,15 @@ public BlobPath add(String path) { return new BlobPath(Collections.unmodifiableList(paths)); } + /** + * Add additional level of paths to the existing path and returns new {@link BlobPath} with the updated paths. + */ + public BlobPath add(Iterable paths) { + List updatedPaths = new ArrayList<>(this.paths); + paths.iterator().forEachRemaining(updatedPaths::add); + return new BlobPath(Collections.unmodifiableList(updatedPaths)); + } + public String buildAsString() { String p = String.join(SEPARATOR, paths); if (p.isEmpty() || p.endsWith(SEPARATOR)) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 30cfc054e3d0a..b51abf19fc000 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -103,9 +103,27 @@ boolean requiresHashAlgorithm() { HASHED_PREFIX(1) { @Override public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { - // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. - // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; + return BlobPath.cleanPath() + .add(hashAlgorithm.hash(pathInput)) + .add(pathInput.basePath()) + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }, + HASHED_INFIX(2) { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; return pathInput.basePath() + .add(hashAlgorithm.hash(pathInput)) .add(pathInput.indexUUID()) .add(pathInput.shardId()) .add(pathInput.dataCategory().getName()) @@ -200,10 +218,11 @@ public enum PathHashAlgorithm { FNV_1A(0) { @Override - long hash(PathInput pathInput) { + String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); - return FNV1a.hash32(input); + long hash = FNV1a.hash64(input); + return RemoteStoreUtils.longToUrlBase64(hash); } }; @@ -218,6 +237,7 @@ public int getCode() { } private static final Map CODE_TO_ENUM; + static { PathHashAlgorithm[] values = values(); Map codeToStatus = new HashMap<>(values.length); @@ -240,7 +260,7 @@ public static PathHashAlgorithm fromCode(int code) { return CODE_TO_ENUM.get(code); } - abstract long hash(PathInput pathInput); + abstract String hash(PathInput pathInput); public static PathHashAlgorithm parseString(String pathHashAlgorithm) { try { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index b4c33d781af86..7d0743e70b6cb 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -10,7 +10,9 @@ import org.opensearch.common.collect.Tuple; +import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -101,4 +103,17 @@ public static void verifyNoMultipleWriters(List mdFiles, Function pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); + } + + public void testGeneratePathForHashedInfixType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList); + basePath = basePath.length() == 0 ? basePath : basePath.substring(0, basePath.length() - 1); + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + String expected = derivePath(basePath, pathInput); + String actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + } + + private String derivePath(String basePath, PathInput pathInput) { + return "".equals(basePath) + ? String.join( + SEPARATOR, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ) + : String.join( + SEPARATOR, + basePath, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ); + } + private List getPathList() { List pathList = new ArrayList<>(); int length = randomIntBetween(0, 5); @@ -152,5 +520,4 @@ private String getPath(List pathList) { } return p + SEPARATOR; } - } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java new file mode 100644 index 0000000000000..9d4b41f5c395f --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; + +public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { + + public void testGetMinVersionOlder() { + Settings settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + } + + public void testGetMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(pathType, resolver.get().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.get().getHashAlgorithm()); + } else { + assertNull(resolver.get().getHashAlgorithm()); + } + + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index d3c7d754d6b61..34074861f2764 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -17,8 +17,10 @@ import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -179,4 +181,32 @@ public void testVerifyMultipleWriters_Translog() throws InterruptedException { ); } + public void testLongToBase64() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "syVHd0gGq0w", + -5878421770170594047L, + "rmumi5UPDQE", + -5147010836697060622L, + "uJIk6f-V6vI", + 937096430362711837L, + "DQE8PQwOVx0", + 8422273604115462710L, + "dOHtOEZzejY", + -2528761975013221124L, + "3OgIYbXSXPw", + -5512387536280560513L, + "s4AQvdu03H8", + -5749656451579835857L, + "sDUd65cNCi8", + 5569654857969679538L, + "TUtjlYLPvLI", + -1563884000447039930L, + "6kv3yZNv9kY" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + assertEquals(11, entry.getValue().length()); + } + } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 57c126b85ff70..29ffb94ce8bf4 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -43,6 +43,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -56,14 +58,16 @@ protected Collection> getPlugins() { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); - BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStorerepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return Arrays.stream(lockDirectory.listAll()) diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 0ee889af5ce1a..ce76914882150 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -101,6 +101,8 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -559,19 +561,16 @@ protected void assertDocCount(String index, long count) { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); - return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); - } - - protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) - throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); - BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return lockDirectory.listAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index f0f5576713042..43bd8a4582547 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -135,6 +135,7 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -210,6 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2593,6 +2595,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } + settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index f381ebdb64fc2..5a3f3b5a07a8d 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -63,14 +63,17 @@ import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.bootstrap.BootstrapForTesting; +import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Numbers; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -120,6 +123,8 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; @@ -1797,4 +1802,39 @@ protected static InetAddress randomIp(boolean v4) { throw new AssertionError(); } } + + public static BlobPath getShardLevelBlobPath( + Client client, + String remoteStoreIndex, + BlobPath basePath, + String shardId, + RemoteStoreEnums.DataCategory dataCategory, + RemoteStoreEnums.DataType dataType + ) { + String indexUUID = client.admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState(); + Map remoteCustomData = state.metadata() + .index(remoteStoreIndex) + .getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + RemoteStoreEnums.PathType type = Objects.isNull(remoteCustomData) + ? RemoteStoreEnums.PathType.FIXED + : RemoteStoreEnums.PathType.valueOf(remoteCustomData.get(RemoteStoreEnums.PathType.NAME)); + RemoteStoreEnums.PathHashAlgorithm hashAlgorithm = Objects.nonNull(remoteCustomData) + ? remoteCustomData.containsKey(RemoteStoreEnums.PathHashAlgorithm.NAME) + ? RemoteStoreEnums.PathHashAlgorithm.valueOf(remoteCustomData.get(RemoteStoreEnums.PathHashAlgorithm.NAME)) + : null + : null; + RemoteStorePathStrategy.PathInput pathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(basePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + return type.path(pathInput, hashAlgorithm); + } } From 6532caa203955e2ca55cc6f241365b0734d040f3 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 9 Apr 2024 19:01:26 +0530 Subject: [PATCH 03/65] =?UTF-8?q?Skip=20Filter=20Allocation=20Decider=20du?= =?UTF-8?q?ring=20mixed=20mode=20for=20existing=20indices=E2=80=A6=20(#129?= =?UTF-8?q?60)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Gaurav Bafna --- .../RemoteMigrationAllocationDeciderIT.java | 130 ++++++++++++++++++ .../RemoteStoreMigrationTestCase.java | 1 - .../decider/FilterAllocationDecider.java | 38 +++++ ...RemoteStoreMigrationAllocationDecider.java | 29 ++-- .../decider/FilterAllocationDeciderTests.java | 59 ++++++++ 5 files changed, 243 insertions(+), 14 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java new file mode 100644 index 0000000000000..de425ffc63816 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteMigrationAllocationDeciderIT extends MigrationBaseTestCase { + + // When the primary is on doc rep node, existing replica copy can get allocated on excluded docrep node. + public void testFilterAllocationSkipsReplica() throws IOException { + addRemote = false; + List docRepNodes = internalCluster().startNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", docRepNodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomDataNode(); + ensureGreen("test"); + } + + // When the primary is on remote node, new replica copy shouldn't get allocated on an excluded docrep node. + public void testFilterAllocationSkipsReplicaOnExcludedNode() throws IOException { + addRemote = false; + List nodes = internalCluster().startNodes(2); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + addRemote = true; + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + String remoteNode = internalCluster().startNode(); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode)) + .execute() + .actionGet(); + client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertEquals(remoteNode, primaryNodeName("test")); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", nodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName("test"))); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(2)) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.isTimedOut()); + ensureYellow("test"); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index a31d203058565..640b83f194c1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -19,7 +19,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index af4b2c61a95b1..d3200c1bc9d75 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -38,11 +38,13 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.util.Map; @@ -102,14 +104,32 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; + private volatile RemoteStoreNodeService.Direction migrationDirection; + private volatile RemoteStoreNodeService.CompatibilityMode compatibilityMode; public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); + this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); + this.compatibilityMode = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings); + clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters, (a, b) -> {}); + clusterSettings.addSettingsUpdateConsumer(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, this::setMigrationDirection); + clusterSettings.addSettingsUpdateConsumer( + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + this::setCompatibilityMode + ); + } + + private void setMigrationDirection(RemoteStoreNodeService.Direction migrationDirection) { + this.migrationDirection = migrationDirection; + } + + private void setCompatibilityMode(RemoteStoreNodeService.CompatibilityMode compatibilityMode) { + this.compatibilityMode = compatibilityMode; } @Override @@ -127,10 +147,28 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing "initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index"; return allocation.decision(Decision.NO, NAME, explanation, initialRecoveryFilters); } + + Decision decision = isRemoteStoreMigrationReplicaDecision(shardRouting, allocation); + if (decision != null) return decision; } return shouldFilter(shardRouting, node.node(), allocation); } + public Decision isRemoteStoreMigrationReplicaDecision(ShardRouting shardRouting, RoutingAllocation allocation) { + assert shardRouting.unassigned(); + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(shardRouting.shardId(), allocation); + if (shardRouting.primary() == false + && shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED + && (compatibilityMode.equals(RemoteStoreNodeService.CompatibilityMode.MIXED)) + && (migrationDirection.equals(RemoteStoreNodeService.Direction.REMOTE_STORE)) + && primaryOnRemote == false) { + String explanation = + "in remote store migration, allocation filters are not applicable for replica copies whose primary is on doc rep node"; + return allocation.decision(Decision.YES, NAME, explanation); + } + return null; + } + @Override public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, RoutingAllocation allocation) { return shouldFilter(indexMetadata, node.node(), allocation); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 27ebe5390ea6d..7d40aacb71e25 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; @@ -60,9 +61,8 @@ public class RemoteStoreMigrationAllocationDecider extends AllocationDecider { public static final String NAME = "remote_store_migration"; - private Direction migrationDirection; - private CompatibilityMode compatibilityMode; - private boolean remoteStoreBackedIndex; + volatile private Direction migrationDirection; + volatile private CompatibilityMode compatibilityMode; public RemoteStoreMigrationAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); @@ -106,9 +106,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // check for remote store backed indices IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.exists(indexMetadata.getSettings())) { - remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); - } + boolean remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); if (remoteStoreBackedIndex && targetNode.isRemoteStoreNode() == false) { // allocations and relocations must be to a remote node String reason = String.format( @@ -133,15 +131,20 @@ private Decision primaryShardDecision(ShardRouting primaryShardRouting, Discover return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); } + // Checks if primary shard is on a remote node. + static boolean isPrimaryOnRemote(ShardId shardId, RoutingAllocation allocation) { + ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(shardId); + if (primaryShardRouting != null) { + DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); + return primaryShardNode.isRemoteStoreNode(); + } + return false; + } + private Decision replicaShardDecision(ShardRouting replicaShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { if (targetNode.isRemoteStoreNode()) { - ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(replicaShardRouting.shardId()); - boolean primaryHasMigratedToRemote = false; - if (primaryShardRouting != null) { - DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); - primaryHasMigratedToRemote = primaryShardNode.isRemoteStoreNode(); - } - if (primaryHasMigratedToRemote == false) { + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(replicaShardRouting.shardId(), allocation); + if (primaryOnRemote == false) { return allocation.decision( Decision.NO, NAME, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index a8282faaddced..e8273d294f24f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -50,6 +51,8 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -61,6 +64,9 @@ import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; public class FilterAllocationDeciderTests extends OpenSearchAllocationTestCase { @@ -406,4 +412,57 @@ public void testWildcardIPFilter() { "test ip validation" ); } + + public void testMixedModeRemoteStoreAllocation() { + // For mixed mode remote store direction cluster's existing indices replica creation , + // we don't consider filter allocation decider for replica of existing indices + FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings initialSettings = Settings.builder() + .put("cluster.routing.allocation.exclude._id", "node2") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .build(); + + FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(initialSettings, clusterSettings); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Arrays.asList( + filterAllocationDecider, + new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), + new ReplicaAfterPrimaryActiveAllocationDecider() + ) + ); + AllocationService service = new AllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + ClusterState state = createInitialClusterState(service, Settings.EMPTY, Settings.EMPTY); + RoutingTable routingTable = state.routingTable(); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + allocation.debugDecision(true); + ShardRouting sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "") + ); + Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( + sr, + state.getRoutingNodes().node("node2"), + allocation + ); + assertEquals(decision.toString(), Type.YES, decision.type()); + + sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "") + ); + decision = (Decision.Single) filterAllocationDecider.canAllocate(sr, state.getRoutingNodes().node("node2"), allocation); + assertEquals(decision.toString(), Type.NO, decision.type()); + } } From c0b18999fb755cc2cf60b39619fb5bb107307e13 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:33:02 -0700 Subject: [PATCH 04/65] [Remote Store] Add support for randomizing Remote Store enabled testing. (#12488) * Add support for randomizing remote store enabled testing. Signed-off-by: Rishikesh1159 * fix spotless check Signed-off-by: Rishikesh1159 * Updating logic to randomly use remote store once per cluster instead of once per node. Signed-off-by: Rishikesh1159 * Add Remote Store Randomization within Replication Type Randomization. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 --- .../indices/IndicesRequestCacheIT.java | 1 + .../recovery/ReplicaToPrimaryPromotionIT.java | 7 +++- .../test/OpenSearchIntegTestCase.java | 32 ++++++++++++++++--- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 52b4dad553180..ec5637cec6485 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -650,6 +650,7 @@ public void testCacheWithInvalidation() throws Exception { .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) ) .get() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 3df4ecff5250c..a2543f0592145 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -56,6 +56,11 @@ protected int numberOfReplicas() { return 1; } + @Override + public boolean useRandomReplicationStrategy() { + return true; + } + public void testPromoteReplicaToPrimary() throws Exception { final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); createIndex(indexName); @@ -65,7 +70,7 @@ public void testPromoteReplicaToPrimary() throws Exception { try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { waitForDocs(numOfDocs, indexer); } - refresh(indexName); + refreshAndWaitForReplication(indexName); } assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 43bd8a4582547..c26c3f8d21380 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -379,6 +379,14 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; + protected static final String REMOTE_BACKED_STORAGE_REPOSITORY_NAME = "test-remote-store-repo"; + + private Path remoteStoreRepositoryPath; + + private ReplicationType randomReplicationType; + + private String randomStorageType; + @BeforeClass public static void beforeClass() throws Exception { testClusterRule.beforeClass(); @@ -1896,11 +1904,19 @@ protected Settings nodeSettings(int nodeOrdinal) { builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } - // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + // Randomly set a Replication Strategy and storage type for the node. Both Replication Strategy and Storage Type can still be + // manually overridden by subclass if needed. if (useRandomReplicationStrategy()) { - ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; - logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); - builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); + if (randomReplicationType.equals(ReplicationType.SEGMENT) && randomStorageType.equals("REMOTE_STORE")) { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + if (remoteStoreRepositoryPath == null) { + remoteStoreRepositoryPath = randomRepoPath().toAbsolutePath(); + } + builder.put(remoteStoreClusterSettings(REMOTE_BACKED_STORAGE_REPOSITORY_NAME, remoteStoreRepositoryPath)); + } else { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), randomReplicationType); + } } return builder.build(); } @@ -1953,6 +1969,14 @@ protected boolean ignoreExternalCluster() { } protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { + if (useRandomReplicationStrategy()) { + randomReplicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + if (randomReplicationType.equals(ReplicationType.SEGMENT)) { + randomStorageType = randomBoolean() ? "REMOTE_STORE" : "LOCAL"; + } else { + randomStorageType = "LOCAL"; + } + } String clusterAddresses = System.getProperty(TESTS_CLUSTER); if (Strings.hasLength(clusterAddresses) && ignoreExternalCluster() == false) { if (scope == Scope.TEST) { From e3dc6ae5e508b0b0a8a4abcd5e9d1241acd0075c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:58:02 -0700 Subject: [PATCH 05/65] Bump com.gradle.enterprise from 3.16.2 to 3.17 (#13116) * Bump com.gradle.enterprise from 3.16.2 to 3.17 Bumps com.gradle.enterprise from 3.16.2 to 3.17. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + settings.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62cf5e45e80f9..c8cfc0fa6b33c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,6 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) - Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/settings.gradle b/settings.gradle index 8fbf32504215b..ccc239ff17062 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.2" + id "com.gradle.enterprise" version "3.17" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') From 032b81bff1e5c8064506b083c6b0e964d7ec4b19 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 9 Apr 2024 15:04:27 -0400 Subject: [PATCH 06/65] Skip Pull Request Checks / Verify Description Checklist for dependabot (#13127) Signed-off-by: Craig Perkins --- .github/workflows/pull-request-checks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 7efcf529588ed..a62ea9cfa179b 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -18,6 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: peternied/check-pull-request-description-checklist@v1.1 + if: github.actor != 'dependabot[bot]' with: checklist-items: | New functionality includes testing. From 3aaada643a5c6daf08c942feebd4e7ff31d97594 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 9 Apr 2024 14:36:06 -0500 Subject: [PATCH 07/65] Create separate file for unreleased 3.0 changes (#13040) Contributors and maintainers frequently put changelog entries in the wrong section. It's an easy mistake to make! Given that making changes intended for the next minor release is the norm, this change optimizes that process by making `CHANGELOG.md` contain _only_ entries for the next minor. Truly breaking changes intended for the next major are kept in a separate file. Credit to @msfroh for the idea. Signed-off-by: Andrew Ross Signed-off-by: Kunal Kotwani Co-authored-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 16 +++- CHANGELOG-3.0.md | 104 +++++++++++++++++++++++ CHANGELOG.md | 99 --------------------- CONTRIBUTING.md | 6 +- 4 files changed, 121 insertions(+), 104 deletions(-) create mode 100644 CHANGELOG-3.0.md diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 9456fbf8b4ca0..04e2ed5006269 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -1,7 +1,7 @@ name: "Changelog Verifier" on: pull_request: - types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] jobs: # Enforces the update of a changelog file on every pull request @@ -13,7 +13,19 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog-3x + with: + skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG-3.0.md' + continue-on-error: true + - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog with: skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG.md' + continue-on-error: true + - run: | + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then + exit 1 + fi diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md new file mode 100644 index 0000000000000..0715c6de49ca4 --- /dev/null +++ b/CHANGELOG-3.0.md @@ -0,0 +1,104 @@ +# CHANGELOG +All notable changes to this project are documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. + +## [Unreleased 3.0] +### Added +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) +- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) +- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) + +### Dependencies +- Bump `log4j-core` from 2.18.0 to 2.19.0 +- Bump `forbiddenapis` from 3.3 to 3.4 +- Bump `avro` from 1.11.1 to 1.11.2 +- Bump `woodstox-core` from 6.3.0 to 6.3.1 +- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) +- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) +- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) +- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) +- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) +- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) +- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) +- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 +- Bump `gson` from 2.10 to 2.10.1 +- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 +- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 +- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 +- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) +- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) +- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) +- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) + +### Changed +- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) +- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) +- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) +- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) + +### Deprecated + +### Removed +- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) +- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) +- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) +- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) +- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) + +### Fixed +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) +- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) + +### Security + +[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD diff --git a/CHANGELOG.md b/CHANGELOG.md index c8cfc0fa6b33c..f801cbcd7d416 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,104 +3,6 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. -## [Unreleased 3.0] -### Added -- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) -- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) -- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) -- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) -- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) -- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) -- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) -- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) -- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) -- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) - -### Dependencies -- Bump `log4j-core` from 2.18.0 to 2.19.0 -- Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.1 to 1.11.2 -- Bump `woodstox-core` from 6.3.0 to 6.3.1 -- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) -- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) -- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) -- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 -- Bump `gson` from 2.10 to 2.10.1 -- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 -- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 -- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) -- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) -- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) -- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) -- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) -- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) -- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) - -### Changed -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) -- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) -- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) -- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) -- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) -- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) -- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) - -### Deprecated - -### Removed -- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) -- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) -- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) -- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) -- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) -- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) -- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) -- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) -- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) -- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) -- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) -- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) -- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) -- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) - -### Fixed -- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) -- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) -- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) -- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) -- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) - -### Security - ## [Unreleased 2.x] ### Added - Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) @@ -146,5 +48,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security -[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD [Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5494925dcf50..bce6ca0d49294 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -146,12 +146,12 @@ Adding in the change is two step process: 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. ### Where should I put my CHANGELOG entry? -Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain sections for the _next major_ and _next minor_ releases. Your entry should go into the section it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will likely be in that section. +Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain **two files**: `CHANGELOG.md` which corresponds to unreleased changes intended for the _next minor_ release and `CHANGELOG-3.0.md` which correspond to unreleased changes intended for the _next major_ release. Your entry should go into file corresponding to the version it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will be in the `CHANGELOG.md` file. The following examples assume the _next major_ release on main is 3.0, then _next minor_ release is 2.5, and the _current_ release is 2.4. -- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry). -- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` on main, do not backport. +- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` in CHANGELOG.md on main, then backport to 2.x (including the changelog entry). +- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` to CHANGELOG-3.0.md on main, do not backport. - **Upgrade a dependency to fix a CVE:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry), then backport to 2.4 and ensure the changelog entry is added to `[Unreleased 2.4.1]`. ## Review Process From 8779e5297a502d4bfc1c0b199813babb7bc62726 Mon Sep 17 00:00:00 2001 From: kkewwei Date: Wed, 10 Apr 2024 03:47:17 +0800 Subject: [PATCH 08/65] implement mark() in class FilterStreamInput (#13098) * implement mark() in class FilterStreamInput Signed-off-by: kkewwei * implement markSupported() in class FilterStreamInput Signed-off-by: kkewwei * add CHANGELOG Signed-off-by: kkewwei --------- Signed-off-by: kkewwei --- CHANGELOG.md | 3 ++- .../common/io/stream/FilterStreamInput.java | 10 ++++++++ .../io/stream/FilterStreamInputTests.java | 23 +++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f801cbcd7d416..94d47182ede45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,7 +44,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix bulk API ignores ingest pipeline for upsert ([#12883](https://github.com/opensearch-project/OpenSearch/pull/12883)) - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) -- Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) ### Security diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java index a6e49567ac7d5..ee67fd4f271a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java @@ -80,6 +80,16 @@ public void reset() throws IOException { delegate.reset(); } + @Override + public void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + @Override public int read() throws IOException { return delegate.read(); diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java index a044586e095e3..ab6dfbc2feb25 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.hamcrest.Matchers.is; /** test the FilterStreamInput using the same BaseStreamTests */ public class FilterStreamInputTests extends BaseStreamTests { @@ -21,4 +24,24 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc return new FilterStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)) { }; } + + public void testMarkAndReset() throws IOException { + FilterStreamInputTests filterStreamInputTests = new FilterStreamInputTests(); + + ByteBuffer buffer = ByteBuffer.wrap(new byte[20]); + for (int i = 0; i < buffer.limit(); i++) { + buffer.put((byte) i); + } + buffer.rewind(); + BytesReference bytesReference = BytesReference.fromByteBuffer(buffer); + StreamInput streamInput = filterStreamInputTests.getStreamInput(bytesReference); + streamInput.read(); + assertThat(streamInput.markSupported(), is(true)); + streamInput.mark(-1); + int int1 = streamInput.read(); + int int2 = streamInput.read(); + streamInput.reset(); + assertEquals(int1, streamInput.read()); + assertEquals(int2, streamInput.read()); + } } From ba20bdae4db6700e0bc530d339197dd2942a401f Mon Sep 17 00:00:00 2001 From: Zing Huang Yang <166090836+frameflare@users.noreply.github.com> Date: Wed, 10 Apr 2024 06:41:07 +0800 Subject: [PATCH 09/65] chore: remove repetitive words (#13090) Signed-off-by: frameflare --- .../org/opensearch/index/shard/IndexShardTestCase.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 38900ce51c3fa..4dd4c734a1701 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -389,7 +389,7 @@ protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperatio } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -407,7 +407,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -441,7 +441,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * * @param routing shard routing to use @@ -459,7 +459,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param indexMetadata indexMetadata for the shard, including any mapping @@ -498,7 +498,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param shardPath path to use for shard data From 74232c7144819eb0a1017e4c3a1854818ba8f0bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 09:04:00 -0400 Subject: [PATCH 10/65] Bump net.minidev:json-smart from 2.5.0 to 2.5.1 in /plugins/repository-hdfs (#13117) * Bump net.minidev:json-smart in /plugins/repository-hdfs Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.0...2.5.1) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/json-smart-2.5.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 94d47182ede45..77f3e3a5dcc04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) -- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893), [#13117](https://github.com/opensearch-project/OpenSearch/pull/13117)) - Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) - Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996), [#12998](https://github.com/opensearch-project/OpenSearch/pull/12998), [#12999](https://github.com/opensearch-project/OpenSearch/pull/12999)) - Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.26.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 2c51bb4cbea53..cd7175e70e607 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -81,7 +81,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.5.0' + api 'net.minidev:json-smart:2.5.1' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.2' diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.0.jar.sha1 deleted file mode 100644 index 3ec055efa1255..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57a64f421b472849c40e77d2e7cce3a141b41e99 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 new file mode 100644 index 0000000000000..fe23968afce1e --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 @@ -0,0 +1 @@ +4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file From 4fc3a02db464021556e25e7a02a90e95c8b65d41 Mon Sep 17 00:00:00 2001 From: Harish Bhakuni Date: Wed, 10 Apr 2024 21:52:42 -0700 Subject: [PATCH 11/65] =?UTF-8?q?[Snapshot=20Interop]=20Add=20changes=20fo?= =?UTF-8?q?r=20overriding=20remote=20store=20and=20replic=E2=80=A6=20(#118?= =?UTF-8?q?68)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Harish Bhakuni --- CHANGELOG.md | 1 + .../remotestore/RemoteRestoreSnapshotIT.java | 233 ++++++++---------- .../SegmentReplicationSnapshotIT.java | 42 +++- .../metadata/MetadataCreateIndexService.java | 14 +- .../opensearch/snapshots/RestoreService.java | 103 ++++++-- 5 files changed, 229 insertions(+), 164 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77f3e3a5dcc04..492da15def01b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Make translog transfer timeout configurable ([#12704](https://github.com/opensearch-project/OpenSearch/pull/12704)) - Reject Resize index requests (i.e, split, shrink and clone), While DocRep to SegRep migration is in progress.([#12686](https://github.com/opensearch-project/OpenSearch/pull/12686)) - Add support for more than one protocol for transport ([#12967](https://github.com/opensearch-project/OpenSearch/pull/12967)) +- Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index f5f9d515f2712..d34a5f4edbaec 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -12,8 +12,6 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; @@ -36,6 +34,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotRestoreException; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -55,7 +54,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; @@ -118,7 +116,7 @@ private void assertDocsPresentInIndex(Client client, String indexName, int numOf } } - public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { + public void testRestoreOperationsShallowCopyEnabled() throws Exception { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); String indexName1 = "testindex1"; @@ -129,8 +127,6 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); String restoredIndexName1 = indexName1 + "-restored"; - String restoredIndexName1Seg = indexName1 + "-restored-seg"; - String restoredIndexName1Doc = indexName1 + "-restored-doc"; String restoredIndexName2 = indexName2 + "-restored"; createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); @@ -212,60 +208,6 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); ensureGreen(restoredIndexName1); assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - - // restore index as seg rep enabled with remote store and remote translog disabled - RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Seg) - .get(); - assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Seg); - - GetIndexResponse getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Seg); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); - - // restore index as doc rep based from shallow copy snapshot - RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Doc) - .get(); - assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Doc); - - getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Doc); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); } /** @@ -579,83 +521,6 @@ protected IndexShard getIndexShard(String node, String indexName) { return shardId.map(indexService::getShard).orElse(null); } - public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - createRepository(remoteStoreRepo2Name, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = getIndexSettings(1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - - logger.info("--> snapshot"); - SnapshotInfo snapshotInfo1 = createSnapshot( - snapshotRepoName, - snapshotName1, - new ArrayList<>(Arrays.asList(indexName1, indexName2)) - ); - assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); - assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); - assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); - - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) - .build(); - // restore index as a remote store index with different remote store repo - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - // indexing some new docs and validating - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { String indexName1 = "testindex1"; String snapshotRepoName = "test-restore-snapshot-repo"; @@ -787,4 +652,98 @@ public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionExcep assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } + public void testInvalidRestoreRequestScenarios() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + String index = "test-index"; + String snapshotRepo = "test-restore-snapshot-repo"; + String newRemoteStoreRepo = "test-new-rs-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndex = index + "-restored"; + + createRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, true)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(index, indexSettings); + + final int numDocsInIndex = 5; + indexDocuments(client, index, numDocsInIndex); + ensureGreen(index); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepo, snapshotName1, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + updateRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, false)); + SnapshotInfo snapshotInfo2 = createSnapshot(snapshotRepo, snapshotName2, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + + DeleteResponse deleteResponse = client().prepareDelete(index, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, index, numDocsInIndex, numDocsInIndex + randomIntBetween(2, 5)); + ensureGreen(index); + + // try index restore with remote store disabled + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(SETTING_REMOTE_STORE_ENABLED) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.enabled] on restore")); + + // try index restore with remote store repository modified + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with remote store repository and translog store repository disabled + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings( + IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository]" + " on restore")); + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index b019bb57743c9..df1fc9b833171 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -112,12 +112,16 @@ public void createSnapshot() { } public RestoreSnapshotResponse restoreSnapshotWithSettings(Settings indexSettings) { + return restoreSnapshotWithSettings(indexSettings, RESTORED_INDEX_NAME); + } + + public RestoreSnapshotResponse restoreSnapshotWithSettings(Settings indexSettings, String restoredIndexName) { RestoreSnapshotRequestBuilder builder = client().admin() .cluster() .prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(false) .setRenamePattern(INDEX_NAME) - .setRenameReplacement(RESTORED_INDEX_NAME); + .setRenameReplacement(restoredIndexName); if (indexSettings != null) { builder.setIndexSettings(indexSettings); } @@ -311,7 +315,8 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio * 2. Snapshot index * 3. Add new set of nodes with `cluster.indices.replication.strategy` set to SEGMENT and `cluster.index.restrict.replication.type` * set to true. - * 4. Perform restore on new set of nodes to validate restored index has `DOCUMENT` replication. + * 4. Perform restore on new set of nodes to validate restored index has `SEGMENT` replication. + * 5. Validate that if replication type is passed as DOCUMENT as request parameter, restore operation fails */ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { final int documentCount = scaledRandomIntBetween(1, 10); @@ -337,9 +342,20 @@ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { createSnapshot(); - // Delete index + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(restoreIndexSegRepSettings(), RESTORED_INDEX_NAME); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME).includeDefaults(true)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.SEGMENT.toString()); + + // Delete indices assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(RESTORED_INDEX_NAME)).get()); + assertFalse("index [" + RESTORED_INDEX_NAME + "] should have been deleted", indexExists(RESTORED_INDEX_NAME)); // Start new set of nodes with cluster level replication type setting and restrict replication type setting. Settings settings = Settings.builder() @@ -361,7 +377,25 @@ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { // Perform snapshot restore logger.info("--> Performing snapshot restore to target index"); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> restoreSnapshotWithSettings(null)); + restoreSnapshotResponse = restoreSnapshotWithSettings(null); + + // Assertions + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(RESTORED_INDEX_NAME); + settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME).includeDefaults(true)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.SEGMENT.toString()); + + // restore index with cluster default setting + restoreSnapshotWithSettings(restoreIndexSegRepSettings(), RESTORED_INDEX_NAME + "1"); + + // Perform Snapshot Restore with different index name + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> restoreSnapshotWithSettings(restoreIndexDocRepSettings(), RESTORED_INDEX_NAME + "2") + ); assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 64bea79c9e47b..a180876685f9c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -993,7 +993,7 @@ static Settings aggregateIndexSettings( * @param clusterSettings cluster level settings * @param combinedTemplateSettings combined template settings which satisfy the index */ - private static void updateReplicationStrategy( + public static void updateReplicationStrategy( Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings, @@ -1008,7 +1008,7 @@ private static void updateReplicationStrategy( final ReplicationType indexReplicationType; if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(requestSettings); - } else if (INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { + } else if (combinedTemplateSettings != null && INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(combinedTemplateSettings); } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings); @@ -1023,20 +1023,20 @@ private static void updateReplicationStrategy( /** * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings - * @param clusterSettings cluster level settings + * @param nodeSettings node settings */ - private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { - if (isRemoteDataAttributePresent(clusterSettings)) { + public static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings nodeSettings) { + if (RemoteStoreNodeAttribute.isRemoteStoreAttributePresent(nodeSettings)) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) .put( SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, - clusterSettings.get( + nodeSettings.get( Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY ) ) .put( SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, - clusterSettings.get( + nodeSettings.get( Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY ) ); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index ff393ecf19a99..980610d337a63 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -79,6 +79,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.ArrayUtils; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; @@ -90,6 +91,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -116,7 +118,9 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; @@ -153,20 +157,30 @@ public class RestoreService implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(RestoreService.class); - private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet( - newHashSet(SETTING_NUMBER_OF_SHARDS, SETTING_VERSION_CREATED, SETTING_INDEX_UUID, SETTING_CREATION_DATE, SETTING_HISTORY_UUID) + private static final Set USER_UNMODIFIABLE_SETTINGS = unmodifiableSet( + newHashSet( + SETTING_NUMBER_OF_SHARDS, + SETTING_VERSION_CREATED, + SETTING_INDEX_UUID, + SETTING_CREATION_DATE, + SETTING_HISTORY_UUID, + SETTING_REMOTE_STORE_ENABLED, + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) ); // It's OK to change some settings, but we shouldn't allow simply removing them - private static final Set UNREMOVABLE_SETTINGS; + private static final Set USER_UNREMOVABLE_SETTINGS; + private static final String REMOTE_STORE_INDEX_SETTINGS_REGEX = "index.remote_store.*"; static { - Set unremovable = new HashSet<>(UNMODIFIABLE_SETTINGS.size() + 4); - unremovable.addAll(UNMODIFIABLE_SETTINGS); + Set unremovable = new HashSet<>(USER_UNMODIFIABLE_SETTINGS.size() + 4); + unremovable.addAll(USER_UNMODIFIABLE_SETTINGS); unremovable.add(SETTING_NUMBER_OF_REPLICAS); unremovable.add(SETTING_AUTO_EXPAND_REPLICAS); unremovable.add(SETTING_VERSION_UPGRADED); - UNREMOVABLE_SETTINGS = unmodifiableSet(unremovable); + USER_UNREMOVABLE_SETTINGS = unmodifiableSet(unremovable); } private final ClusterService clusterService; @@ -370,10 +384,16 @@ public ClusterState execute(ClusterState currentState) { boolean partial = checkPartial(index); IndexId snapshotIndexId = repositoryData.resolveIndexId(index); + + final Settings overrideSettingsInternal = getOverrideSettingsInternal(); + final String[] ignoreSettingsInternal = getIgnoreSettingsInternal(); + IndexMetadata snapshotIndexMetadata = updateIndexSettings( metadata.index(index), request.indexSettings(), - request.ignoreIndexSettings() + request.ignoreIndexSettings(), + overrideSettingsInternal, + ignoreSettingsInternal ); if (isRemoteSnapshot) { snapshotIndexMetadata = addSnapshotToIndexSettings(snapshotIndexMetadata, snapshot, snapshotIndexId); @@ -434,7 +454,7 @@ public ClusterState execute(ClusterState currentState) { final Index renamedIndex; if (currentIndexMetadata == null) { // Index doesn't exist - create it and start recovery - // Make sure that the index we are about to create has a validate name + // Make sure that the index we are about to create has valid name boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(snapshotIndexMetadata.getSettings()); createIndexService.validateIndexName(renamedIndexName, currentState); createIndexService.validateDotIndex(renamedIndexName, isHidden); @@ -640,6 +660,40 @@ private void checkAliasNameConflicts(Map renamedIndices, Set ignoreShards) { for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) { if (index.equals(failure.index())) { @@ -712,16 +766,18 @@ private void validateExistingIndex( */ private IndexMetadata updateIndexSettings( IndexMetadata indexMetadata, - Settings changeSettings, - String[] ignoreSettings + Settings overrideSettings, + String[] ignoreSettings, + Settings overrideSettingsInternal, + String[] ignoreSettingsInternal ) { Settings normalizedChangeSettings = Settings.builder() - .put(changeSettings) + .put(overrideSettings) .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) .build(); if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetadata.getSettings()) - && IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(changeSettings) - && IndexSettings.INDEX_SOFT_DELETES_SETTING.get(changeSettings) == false) { + && IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(overrideSettings) + && IndexSettings.INDEX_SOFT_DELETES_SETTING.get(overrideSettings) == false) { throw new SnapshotRestoreException( snapshot, "cannot disable setting [" + IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey() + "] on restore" @@ -733,7 +789,7 @@ private IndexMetadata updateIndexSettings( List simpleMatchPatterns = new ArrayList<>(); for (String ignoredSetting : ignoreSettings) { if (!Regex.isSimpleMatchPattern(ignoredSetting)) { - if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) { + if (USER_UNREMOVABLE_SETTINGS.contains(ignoredSetting)) { throw new SnapshotRestoreException( snapshot, "cannot remove setting [" + ignoredSetting + "] on restore" @@ -745,8 +801,18 @@ private IndexMetadata updateIndexSettings( simpleMatchPatterns.add(ignoredSetting); } } + + // add internal settings to ignore settings list + for (String ignoredSetting : ignoreSettingsInternal) { + if (!Regex.isSimpleMatchPattern(ignoredSetting)) { + keyFilters.add(ignoredSetting); + } else { + simpleMatchPatterns.add(ignoredSetting); + } + } + Predicate settingsFilter = k -> { - if (UNREMOVABLE_SETTINGS.contains(k) == false) { + if (USER_UNREMOVABLE_SETTINGS.contains(k) == false) { for (String filterKey : keyFilters) { if (k.equals(filterKey)) { return false; @@ -763,12 +829,17 @@ private IndexMetadata updateIndexSettings( Settings.Builder settingsBuilder = Settings.builder() .put(settings.filter(settingsFilter)) .put(normalizedChangeSettings.filter(k -> { - if (UNMODIFIABLE_SETTINGS.contains(k)) { + if (USER_UNMODIFIABLE_SETTINGS.contains(k)) { throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + k + "] on restore"); } else { return true; } })); + + // override internal settings + if (overrideSettingsInternal != null) { + settingsBuilder.put(overrideSettingsInternal).normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX); + } settingsBuilder.remove(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()); return builder.settings(settingsBuilder).build(); } From 645b1f1b6c0738ef2b4a0de364b6bcd42af239b9 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Wed, 10 Apr 2024 22:55:18 -0700 Subject: [PATCH 12/65] [Derived Fields] PR4: Capability to define derived fields in search request (#12850) * Support derived fields definition in search request * adds support for fetch phase on derived fields * adds support for highlighting on derived fields --------- Signed-off-by: Rishabh Maurya --- .../java/org/opensearch/client/SearchIT.java | 229 +++++++++++ .../search/fields/SearchFieldsIT.java | 157 ++++++++ .../action/search/SearchRequestBuilder.java | 15 + .../opensearch/index/mapper/DerivedField.java | 90 +++++ .../index/mapper/DerivedFieldMapper.java | 37 +- .../mapper/DerivedFieldSupportedTypes.java | 40 +- .../index/mapper/DerivedFieldType.java | 197 +++------- .../mapper/DerivedFieldValueFetcher.java | 38 +- .../index/mapper/DocumentMapperParser.java | 2 +- .../index/query/DerivedFieldQuery.java | 29 +- .../index/query/QueryShardContext.java | 30 +- .../VectorGeoPointShapeQueryProcessor.java | 5 +- .../opensearch/script/DerivedFieldScript.java | 7 - .../org/opensearch/search/SearchService.java | 25 ++ .../search/builder/SearchSourceBuilder.java | 76 +++- .../subphase/highlight/HighlightPhase.java | 8 +- .../subphase/highlight/HighlightUtils.java | 4 + .../highlight/UnifiedHighlighter.java | 5 + .../mapper/DerivedFieldMapperQueryTests.java | 48 ++- .../index/mapper/DerivedFieldTypeTests.java | 7 +- .../index/query/DerivedFieldQueryTests.java | 8 +- .../index/query/QueryShardContextTests.java | 31 ++ .../opensearch/search/SearchServiceTests.java | 44 +++ .../builder/SearchSourceBuilderTests.java | 46 +++ .../DerivedFieldFetchAndHighlightTests.java | 366 ++++++++++++++++++ .../opensearch/script/MockScriptEngine.java | 44 ++- 26 files changed, 1373 insertions(+), 215 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedField.java create mode 100644 server/src/test/java/org/opensearch/search/fetch/subphase/highlight/DerivedFieldFetchAndHighlightTests.java diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index f2778a97c0c1a..c1f1cbf1d0e91 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -54,15 +54,19 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.client.core.CountRequest; import org.opensearch.client.core.CountResponse; +import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.geometry.Rectangle; +import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.ScriptQueryBuilder; import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.join.aggregations.Children; @@ -102,6 +106,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.hamcrest.Matchers; import org.junit.Before; @@ -116,6 +122,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.both; @@ -764,6 +771,228 @@ public void testSearchWithWeirdScriptFields() throws Exception { } } + public void testSearchWithDerivedFields() throws Exception { + // Just testing DerivedField definition from SearchSourceBuilder derivedField() + // We are not testing the full functionality here + Request doc = new Request("PUT", "test/_doc/1"); + doc.setJsonEntity("{\"field\":\"value\"}"); + client().performRequest(doc); + client().performRequest(new Request("POST", "/test/_refresh")); + // Keyword field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .query(new TermsQueryBuilder("result", "value")) + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals("value", values.get(0)); + + // multi valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField( + "result", + "keyword", + new Script("emit(params._source[\"field\"]);emit(params._source[\"field\"] + \"_2\")") + ) + .query(new TermsQueryBuilder("result", "value_2")) + .fetchField("result") + ); + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals("value", values.get(0)); + assertEquals("value_2", values.get(1)); + } + // Boolean field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "boolean", new Script("emit(((String)params._source[\"field\"]).equals(\"value\"))")) + .query(new TermsQueryBuilder("result", "true")) + .fetchField("result") + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(true, values.get(0)); + } + // Long field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "long", new Script("emit(Long.MAX_VALUE)")) + .query(new RangeQueryBuilder("result").from(Long.MAX_VALUE - 1).to(Long.MAX_VALUE)) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(Long.MAX_VALUE, values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "long", new Script("emit(Long.MAX_VALUE); emit(Long.MIN_VALUE);")) + .query(new RangeQueryBuilder("result").from(Long.MIN_VALUE).to(Long.MIN_VALUE + 1)) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(Long.MAX_VALUE, values.get(0)); + assertEquals(Long.MIN_VALUE, values.get(1)); + } + // Double field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "double", new Script("emit(Double.MAX_VALUE)")) + .query(new RangeQueryBuilder("result").from(Double.MAX_VALUE - 1).to(Double.MAX_VALUE)) + .fetchField("result") + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(Double.MAX_VALUE, values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "double", new Script("emit(Double.MAX_VALUE); emit(Double.MIN_VALUE);")) + .query(new RangeQueryBuilder("result").from(Double.MIN_VALUE).to(Double.MIN_VALUE + 1)) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(Double.MAX_VALUE, values.get(0)); + assertEquals(Double.MIN_VALUE, values.get(1)); + } + // Date field + { + DateTime date1 = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + DateTime date2 = new DateTime(1990, 12, 30, 0, 0, DateTimeZone.UTC); + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "date", new Script("emit(" + date1.getMillis() + "L)")) + .query(new RangeQueryBuilder("result").from(date1.toString()).to(date2.toString())) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(date1.toString(), values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "date", new Script("emit(" + date1.getMillis() + "L); " + "emit(" + date2.getMillis() + "L)")) + .query(new RangeQueryBuilder("result").from(date1.toString()).to(date2.toString())) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(date1.toString(), values.get(0)); + assertEquals(date2.toString(), values.get(1)); + } + // Geo field + { + GeoShapeQueryBuilder qb = geoShapeQuery("result", new Rectangle(-35, 35, 35, -35)); + qb.relation(ShapeRelation.INTERSECTS); + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "geo_point", new Script("emit(10.0, 20.0)")) + .query(qb) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(10.0, ((HashMap) values.get(0)).get("lat")); + assertEquals(20.0, ((HashMap) values.get(0)).get("lon")); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "geo_point", new Script("emit(10.0, 20.0); emit(20.0, 30.0);")) + .query(qb) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(10.0, ((HashMap) values.get(0)).get("lat")); + assertEquals(20.0, ((HashMap) values.get(0)).get("lon")); + assertEquals(20.0, ((HashMap) values.get(1)).get("lat")); + assertEquals(30.0, ((HashMap) values.get(1)).get("lon")); + } + // IP field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource().derivedField("result", "ip", new Script("emit(\"10.0.0.1\")")).fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals("10.0.0.1", values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "ip", new Script("emit(\"10.0.0.1\"); emit(\"10.0.0.2\");")) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals("10.0.0.1", values.get(0)); + assertEquals("10.0.0.2", values.get(1)); + + } + + } + public void testSearchScroll() throws Exception { for (int i = 0; i < 100; i++) { XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 906d45ef84b3f..2ce96092203e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -40,6 +40,7 @@ import org.opensearch.common.Numbers; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; @@ -51,6 +52,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; @@ -189,6 +191,20 @@ protected Map, Object>> pluginScripts() { scripts.put("doc['s']", vars -> docScript(vars, "s")); scripts.put("doc['ms']", vars -> docScript(vars, "ms")); + scripts.put("doc['keyword_field']", vars -> sourceScript(vars, "keyword_field")); + scripts.put("doc['multi_keyword_field']", vars -> sourceScript(vars, "multi_keyword_field")); + scripts.put("doc['long_field']", vars -> sourceScript(vars, "long_field")); + scripts.put("doc['multi_long_field']", vars -> sourceScript(vars, "multi_long_field")); + scripts.put("doc['double_field']", vars -> sourceScript(vars, "double_field")); + scripts.put("doc['multi_double_field']", vars -> sourceScript(vars, "multi_double_field")); + scripts.put("doc['date_field']", vars -> sourceScript(vars, "date_field")); + scripts.put("doc['multi_date_field']", vars -> sourceScript(vars, "multi_date_field")); + scripts.put("doc['ip_field']", vars -> sourceScript(vars, "ip_field")); + scripts.put("doc['multi_ip_field']", vars -> sourceScript(vars, "multi_ip_field")); + scripts.put("doc['boolean_field']", vars -> sourceScript(vars, "boolean_field")); + scripts.put("doc['geo_field']", vars -> sourceScript(vars, "geo_field")); + scripts.put("doc['multi_geo_field']", vars -> sourceScript(vars, "multi_geo_field")); + return scripts; } @@ -1299,6 +1315,147 @@ public void testScriptFields() throws Exception { } } + public void testDerivedFields() throws Exception { + assertAcked( + prepareCreate("index").setMapping( + "keyword_field", + "type=keyword", + "multi_keyword_field", + "type=keyword", + "long_field", + "type=long", + "multi_long_field", + "type=long", + "double_field", + "type=double", + "multi_double_field", + "type=double", + "date_field", + "type=date", + "multi_date_field", + "type=date", + "ip_field", + "type=ip", + "multi_ip_field", + "type=ip", + "boolean_field", + "type=boolean", + "geo_field", + "type=geo_point", + "multi_geo_field", + "type=geo_point" + ).get() + ); + final int numDocs = randomIntBetween(3, 8); + List reqs = new ArrayList<>(); + + DateTime date1 = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + DateTime date2 = new DateTime(1990, 12, 30, 0, 0, DateTimeZone.UTC); + + for (int i = 0; i < numDocs; ++i) { + reqs.add( + client().prepareIndex("index") + .setId(Integer.toString(i)) + .setSource( + "keyword_field", + Integer.toString(i), + "multi_keyword_field", + new String[] { Integer.toString(i), Integer.toString(i + 1) }, + "long_field", + (long) i, + "multi_long_field", + new long[] { i, i + 1 }, + "double_field", + (double) i, + "multi_double_field", + new double[] { i, i + 1 }, + "date_field", + date1.getMillis(), + "multi_date_field", + new Long[] { date1.getMillis(), date2.getMillis() }, + "ip_field", + "172.16.1.10", + "multi_ip_field", + new String[] { "172.16.1.10", "172.16.1.11" }, + "boolean_field", + true, + "geo_field", + new GeoPoint(12.0, 10.0), + "multi_geo_field", + new GeoPoint[] { new GeoPoint(12.0, 10.0), new GeoPoint(13.0, 10.0) } + ) + ); + } + indexRandom(true, reqs); + indexRandomForConcurrentSearch("index"); + ensureSearchable(); + SearchRequestBuilder req = client().prepareSearch("index"); + String[][] fieldLookup = new String[][] { + { "keyword_field", "keyword" }, + { "multi_keyword_field", "keyword" }, + { "long_field", "long" }, + { "multi_long_field", "long" }, + { "double_field", "double" }, + { "multi_double_field", "double" }, + { "date_field", "date" }, + { "multi_date_field", "date" }, + { "ip_field", "ip" }, + { "multi_ip_field", "ip" }, + { "boolean_field", "boolean" }, + { "geo_field", "geo_point" }, + { "multi_geo_field", "geo_point" } }; + for (String[] field : fieldLookup) { + req.addDerivedField( + "derived_" + field[0], + field[1], + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field[0] + "']", Collections.emptyMap()) + ); + } + req.addFetchField("derived_*"); + SearchResponse resp = req.get(); + assertSearchResponse(resp); + for (SearchHit hit : resp.getHits().getHits()) { + final int id = Integer.parseInt(hit.getId()); + Map fields = hit.getFields(); + + assertEquals(fields.get("derived_keyword_field").getValues().get(0), Integer.toString(id)); + assertEquals(fields.get("derived_multi_keyword_field").getValues().get(0), Integer.toString(id)); + assertEquals(fields.get("derived_multi_keyword_field").getValues().get(1), Integer.toString(id + 1)); + + assertEquals(fields.get("derived_long_field").getValues().get(0), id); + assertEquals(fields.get("derived_multi_long_field").getValues().get(0), id); + assertEquals(fields.get("derived_multi_long_field").getValues().get(1), (id + 1)); + + assertEquals(fields.get("derived_double_field").getValues().get(0), (double) id); + assertEquals(fields.get("derived_multi_double_field").getValues().get(0), (double) id); + assertEquals(fields.get("derived_multi_double_field").getValues().get(1), (double) (id + 1)); + + assertEquals( + fields.get("derived_date_field").getValues().get(0), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date1) + ); + assertEquals( + fields.get("derived_multi_date_field").getValues().get(0), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date1) + ); + assertEquals( + fields.get("derived_multi_date_field").getValues().get(1), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date2) + ); + + assertEquals(fields.get("derived_ip_field").getValues().get(0), "172.16.1.10"); + assertEquals(fields.get("derived_multi_ip_field").getValues().get(0), "172.16.1.10"); + assertEquals(fields.get("derived_multi_ip_field").getValues().get(1), "172.16.1.11"); + + assertEquals(fields.get("derived_boolean_field").getValues().get(0), true); + + assertEquals(fields.get("derived_geo_field").getValues().get(0), new GeoPoint(12.0, 10.0)); + assertEquals(fields.get("derived_multi_geo_field").getValues().get(0), new GeoPoint(12.0, 10.0)); + assertEquals(fields.get("derived_multi_geo_field").getValues().get(1), new GeoPoint(13.0, 10.0)); + + } + } + public void testDocValueFieldsWithFieldAlias() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 9dac827e7d518..4a547ee2c82bd 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -363,6 +363,21 @@ public SearchRequestBuilder addScriptField(String name, Script script) { return this; } + /** + * Adds a derived field of a given type. The script provided will be used to derive the value + * of a given type. Thereafter, it can be treated as regular field of a given type to perform + * query on them. + * + * @param name The name of the field to be used in various parts of the query. The name will also represent + * the field value in the return hit. + * @param type The type of derived field. All values emitted by script must be of this type + * @param script The script to use + */ + public SearchRequestBuilder addDerivedField(String name, String type, Script script) { + sourceBuilder().derivedField(name, type, script); + return this; + } + /** * Adds a sort against the given field name and the sort ordering. * diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java new file mode 100644 index 0000000000000..7ebe4e5f0b0e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.Objects; + +/** + * DerivedField representation: expects a name, type and script. + */ +@PublicApi(since = "2.14.0") +public class DerivedField implements Writeable, ToXContentFragment { + + private final String name; + private final String type; + private final Script script; + + public DerivedField(String name, String type, Script script) { + this.name = name; + this.type = type; + this.script = script; + } + + public DerivedField(StreamInput in) throws IOException { + name = in.readString(); + type = in.readString(); + script = new Script(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(type); + script.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(name); + builder.field("type", type); + builder.field("script", script); + builder.endObject(); + return builder; + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + public Script getScript() { + return script; + } + + @Override + public int hashCode() { + return Objects.hash(name, type, script); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + DerivedField other = (DerivedField) obj; + return Objects.equals(name, other.name) && Objects.equals(type, other.type) && Objects.equals(script, other.script); + } + +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java index b448487a4f810..c6ae71320c35c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java @@ -14,7 +14,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.function.Function; /** @@ -37,7 +39,7 @@ private static DerivedFieldMapper toType(FieldMapper in) { */ public static class Builder extends ParametrizedFieldMapper.Builder { // TODO: The type of parameter may change here if the actual underlying FieldType object is needed - private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, "text"); + private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, ""); private final Parameter