From 35605c696b2735f66ce7ae81f228a800df20c1d0 Mon Sep 17 00:00:00 2001 From: Ryan Bogan <10944539+ryanbogan@users.noreply.github.com> Date: Fri, 23 Dec 2022 13:42:10 -0600 Subject: [PATCH 01/13] Added TransportActions support and removed LocalNodeResponse for extensions (#5615) * Added getSettings() support, ActionListener onFailure(), and initial createComponents support for extensions Signed-off-by: Ryan Bogan * Update CHANGELOG Signed-off-by: Ryan Bogan * Rework EnvironmentSettings API Signed-off-by: Ryan Bogan * Addressed PR Comments Signed-off-by: Ryan Bogan * Removed ExtensionActionListener and changed exception types Signed-off-by: Ryan Bogan * Added TransportActions support and removed LocalNodeResponse for extensions Signed-off-by: Ryan Bogan * Update CHANGELOG Signed-off-by: Ryan Bogan * Update failure handling in UpdateSettingsResponseHandler Signed-off-by: Ryan Bogan * Added REST updates Signed-off-by: Ryan Bogan * Fix merge conflict Signed-off-by: Ryan Bogan Signed-off-by: Ryan Bogan --- CHANGELOG.md | 2 +- .../org/opensearch/action/ActionModule.java | 7 + .../opensearch/cluster/LocalNodeResponse.java | 60 ---- .../extensions/DiscoveryExtensionNode.java | 21 +- .../extensions/ExtensionDependency.java | 89 ++++++ .../extensions/ExtensionsManager.java | 114 ++++--- .../extensions/ExtensionsSettings.java | 6 + .../RegisterTransportActionsRequest.java | 36 ++- .../action/ExtensionActionRequest.java | 76 +++++ .../action/ExtensionActionResponse.java | 59 ++++ .../ExtensionHandleTransportRequest.java | 89 ++++++ .../action/ExtensionProxyAction.java | 25 ++ .../action/ExtensionTransportAction.java | 55 ++++ .../ExtensionTransportActionsHandler.java | 193 ++++++++++++ .../TransportActionRequestFromExtension.java | 102 ++++++ .../TransportActionResponseToExtension.java | 58 ++++ .../extensions/action/package-info.java | 10 + .../extensions/rest/ExtensionRestRequest.java | 292 ++++++++++++++++++ .../rest/ExtensionRestResponse.java | 113 +++++++ .../rest/RestExecuteOnExtensionRequest.java | 77 ----- .../rest/RestExecuteOnExtensionResponse.java | 56 +++- .../rest/RestSendToExtensionAction.java | 90 +++--- .../main/java/org/opensearch/node/Node.java | 6 +- .../extensions/ExtensionsManagerTests.java | 146 +++++++-- .../RegisterTransportActionsRequestTests.java | 8 +- .../action/ExtensionActionRequestTests.java | 37 +++ .../action/ExtensionActionResponseTests.java | 32 ++ .../ExtensionHandleTransportRequestTests.java | 35 +++ .../action/ExtensionProxyActionTests.java | 18 ++ ...ExtensionTransportActionsHandlerTests.java | 181 +++++++++++ ...nsportActionRequestFromExtensionTests.java | 42 +++ ...ansportActionResponseToExtensionTests.java | 43 +++ .../rest/ExtensionRestRequestTests.java | 262 ++++++++++++++++ .../rest/ExtensionRestResponseTests.java | 132 ++++++++ .../rest/RestExecuteOnExtensionTests.java | 94 ------ .../rest/RestSendToExtensionActionTests.java | 3 +- .../src/test/resources/config/extensions.yml | 5 + 37 files changed, 2297 insertions(+), 377 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/extensions/ExtensionDependency.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionActionRequest.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionHandleTransportRequest.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionProxyAction.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/TransportActionRequestFromExtension.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/TransportActionResponseToExtension.java create mode 100644 server/src/main/java/org/opensearch/extensions/action/package-info.java create mode 100644 server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java create mode 100644 server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java delete mode 100644 server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionRequest.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/ExtensionProxyActionTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/action/TransportActionResponseToExtensionTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java delete mode 100644 server/src/test/java/org/opensearch/extensions/rest/RestExecuteOnExtensionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 07f70f9f4a7aa..42cd569c9fb49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add feature flag for extensions ([#5211](https://github.com/opensearch-project/OpenSearch/pull/5211)) - Added jackson dependency to server ([#5366] (https://github.com/opensearch-project/OpenSearch/pull/5366)) - Adding support to register settings dynamically ([#5495](https://github.com/opensearch-project/OpenSearch/pull/5495)) -- Added experimental support for extensions ([#5347](https://github.com/opensearch-project/OpenSearch/pull/5347)), ([#5518](https://github.com/opensearch-project/OpenSearch/pull/5518), ([#5597](https://github.com/opensearch-project/OpenSearch/pull/5597))) +- Added experimental support for extensions ([#5347](https://github.com/opensearch-project/OpenSearch/pull/5347)), ([#5518](https://github.com/opensearch-project/OpenSearch/pull/5518), ([#5597](https://github.com/opensearch-project/OpenSearch/pull/5597)), ([#5615](https://github.com/opensearch-project/OpenSearch/pull/5615))) - Add CI bundle pattern to distribution download ([#5348](https://github.com/opensearch-project/OpenSearch/pull/5348)) - Add support for ppc64le architecture ([#5459](https://github.com/opensearch-project/OpenSearch/pull/5459)) diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 84bc9b395c5dc..bba3aabdd61f9 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -280,6 +280,8 @@ import org.opensearch.common.inject.TypeLiteral; import org.opensearch.common.inject.multibindings.MapBinder; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.extensions.action.ExtensionProxyAction; +import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; @@ -703,6 +705,11 @@ public void reg // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + // ExtensionProxyAction + actions.register(ExtensionProxyAction.INSTANCE, ExtensionTransportAction.class); + } + // Decommission actions actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java b/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java deleted file mode 100644 index ef1ef4a49ad62..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster; - -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; - -import java.io.IOException; -import java.util.Objects; - -/** - * LocalNode Response for Extensibility - * - * @opensearch.internal - */ -public class LocalNodeResponse extends TransportResponse { - private final DiscoveryNode localNode; - - public LocalNodeResponse(ClusterService clusterService) { - this.localNode = clusterService.localNode(); - } - - public LocalNodeResponse(StreamInput in) throws IOException { - super(in); - this.localNode = new DiscoveryNode(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - this.localNode.writeTo(out); - } - - @Override - public String toString() { - return "LocalNodeResponse{" + "localNode=" + localNode + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - LocalNodeResponse that = (LocalNodeResponse) o; - return Objects.equals(localNode, that.localNode); - } - - @Override - public int hashCode() { - return Objects.hash(localNode); - } - -} diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java index e4fa0d74f78f0..1d9e8b768be33 100644 --- a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -20,6 +20,9 @@ import org.opensearch.plugins.PluginInfo; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -30,6 +33,7 @@ public class DiscoveryExtensionNode extends DiscoveryNode implements Writeable, ToXContentFragment { private final PluginInfo pluginInfo; + private List dependencies = Collections.emptyList(); public DiscoveryExtensionNode( String name, @@ -40,16 +44,22 @@ public DiscoveryExtensionNode( TransportAddress address, Map attributes, Version version, - PluginInfo pluginInfo + PluginInfo pluginInfo, + List dependencies ) { super(name, id, ephemeralId, hostName, hostAddress, address, attributes, DiscoveryNodeRole.BUILT_IN_ROLES, version); this.pluginInfo = pluginInfo; + this.dependencies = dependencies; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); pluginInfo.writeTo(out); + out.writeVInt(dependencies.size()); + for (ExtensionDependency dependency : dependencies) { + dependency.writeTo(out); + } } /** @@ -61,6 +71,15 @@ public void writeTo(StreamOutput out) throws IOException { public DiscoveryExtensionNode(final StreamInput in) throws IOException { super(in); this.pluginInfo = new PluginInfo(in); + int size = in.readVInt(); + dependencies = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + dependencies.add(new ExtensionDependency(in)); + } + } + + public List getDependencies() { + return dependencies; } @Override diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java new file mode 100644 index 0000000000000..5e7fd651edfac --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java @@ -0,0 +1,89 @@ +/* +* Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import java.io.IOException; +import java.util.Objects; + +import org.opensearch.Version; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +/** + * This class handles the dependent extensions information + * + * @opensearch.internal + */ +public class ExtensionDependency implements Writeable { + private String uniqueId; + private Version version; + + public ExtensionDependency(String uniqueId, Version version) { + this.uniqueId = uniqueId; + this.version = version; + } + + /** + * Jackson requires a no-arg constructor. + * + */ + @SuppressWarnings("unused") + private ExtensionDependency() {} + + /** + * Reads the extension dependency information + * + * @throws IOException if an I/O exception occurred reading the extension dependency information + */ + public ExtensionDependency(StreamInput in) throws IOException { + uniqueId = in.readString(); + version = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uniqueId); + Version.writeVersion(version, out); + } + + /** + * The uniqueId of the dependency extension + * + * @return the extension uniqueId + */ + public String getUniqueId() { + return uniqueId; + } + + /** + * The minimum version of the dependency extension + * + * @return the extension version + */ + public Version getVersion() { + return version; + } + + public String toString() { + return "ExtensionDependency:{uniqueId=" + uniqueId + ", version=" + version + "}"; + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + ExtensionDependency that = (ExtensionDependency) obj; + return Objects.equals(uniqueId, that.uniqueId) && Objects.equals(version, that.version); + } + + public int hashCode() { + return Objects.hash(uniqueId, version); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index e42b9d1e755a5..e638faab3a747 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -26,8 +26,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterSettingsResponse; -import org.opensearch.cluster.LocalNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.FileSystemUtils; @@ -39,6 +39,10 @@ import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.discovery.InitializeExtensionResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; +import org.opensearch.extensions.action.ExtensionActionRequest; +import org.opensearch.extensions.action.ExtensionActionResponse; +import org.opensearch.extensions.action.ExtensionTransportActionsHandler; +import org.opensearch.extensions.action.TransportActionRequestFromExtension; import org.opensearch.extensions.rest.RegisterRestActionsRequest; import org.opensearch.extensions.rest.RestActionsRequestHandler; import org.opensearch.extensions.settings.CustomSettingsRequestHandler; @@ -71,7 +75,6 @@ public class ExtensionsManager { public static final String INDICES_EXTENSION_POINT_ACTION_NAME = "indices:internal/extensions"; public static final String INDICES_EXTENSION_NAME_ACTION_NAME = "indices:internal/name"; public static final String REQUEST_EXTENSION_CLUSTER_STATE = "internal:discovery/clusterstate"; - public static final String REQUEST_EXTENSION_LOCAL_NODE = "internal:discovery/localnode"; public static final String REQUEST_EXTENSION_CLUSTER_SETTINGS = "internal:discovery/clustersettings"; public static final String REQUEST_EXTENSION_ENVIRONMENT_SETTINGS = "internal:discovery/enviornmentsettings"; public static final String REQUEST_EXTENSION_ADD_SETTINGS_UPDATE_CONSUMER = "internal:discovery/addsettingsupdateconsumer"; @@ -81,6 +84,9 @@ public class ExtensionsManager { public static final String REQUEST_EXTENSION_REGISTER_TRANSPORT_ACTIONS = "internal:discovery/registertransportactions"; public static final String REQUEST_OPENSEARCH_PARSE_NAMED_WRITEABLE = "internal:discovery/parsenamedwriteable"; public static final String REQUEST_REST_EXECUTE_ON_EXTENSION_ACTION = "internal:extensions/restexecuteonextensiontaction"; + public static final String REQUEST_EXTENSION_HANDLE_TRANSPORT_ACTION = "internal:extensions/handle-transportaction"; + public static final String TRANSPORT_ACTION_REQUEST_FROM_EXTENSION = "internal:extensions/request-transportaction-from-extension"; + public static final int EXTENSION_REQUEST_WAIT_TIMEOUT = 10; private static final Logger logger = LogManager.getLogger(ExtensionsManager.class); @@ -91,7 +97,6 @@ public class ExtensionsManager { */ public static enum RequestType { REQUEST_EXTENSION_CLUSTER_STATE, - REQUEST_EXTENSION_LOCAL_NODE, REQUEST_EXTENSION_CLUSTER_SETTINGS, REQUEST_EXTENSION_REGISTER_REST_ACTIONS, REQUEST_EXTENSION_REGISTER_SETTINGS, @@ -111,6 +116,7 @@ public static enum OpenSearchRequestType { } private final Path extensionsPath; + private ExtensionTransportActionsHandler extensionTransportActionsHandler; // A list of initialized extensions, a subset of the values of map below which includes all extensions private List extensions; private Map extensionIdMap; @@ -120,6 +126,7 @@ public static enum OpenSearchRequestType { private ClusterService clusterService; private Settings environmentSettings; private AddSettingsUpdateConsumerRequestHandler addSettingsUpdateConsumerRequestHandler; + private NodeClient client; public ExtensionsManager() { this.extensionsPath = Path.of(""); @@ -135,10 +142,13 @@ public ExtensionsManager() { public ExtensionsManager(Settings settings, Path extensionsPath) throws IOException { logger.info("ExtensionsManager initialized"); this.extensionsPath = extensionsPath; - this.transportService = null; this.extensions = new ArrayList(); this.extensionIdMap = new HashMap(); + // will be initialized in initializeServicesAndRestHandler which is called after the Node is initialized + this.transportService = null; this.clusterService = null; + this.client = null; + this.extensionTransportActionsHandler = null; /* * Now Discover extensions @@ -156,13 +166,15 @@ public ExtensionsManager(Settings settings, Path extensionsPath) throws IOExcept * @param transportService The Node's transport service. * @param clusterService The Node's cluster service. * @param initialEnvironmentSettings The finalized view of settings for the Environment + * @param client The client used to make transport requests */ public void initializeServicesAndRestHandler( RestController restController, SettingsModule settingsModule, TransportService transportService, ClusterService clusterService, - Settings initialEnvironmentSettings + Settings initialEnvironmentSettings, + NodeClient client ) { this.restActionsRequestHandler = new RestActionsRequestHandler(restController, extensionIdMap, transportService); this.customSettingsRequestHandler = new CustomSettingsRequestHandler(settingsModule); @@ -174,9 +186,20 @@ public void initializeServicesAndRestHandler( transportService, REQUEST_EXTENSION_UPDATE_SETTINGS ); + this.client = client; + this.extensionTransportActionsHandler = new ExtensionTransportActionsHandler(extensionIdMap, transportService, client); registerRequestHandler(); } + /** + * Handles Transport Request from {@link org.opensearch.extensions.action.ExtensionTransportAction} which was invoked by an extension via {@link ExtensionTransportActionsHandler}. + * + * @param request which was sent by an extension. + */ + public ExtensionActionResponse handleTransportRequest(ExtensionActionRequest request) throws InterruptedException { + return extensionTransportActionsHandler.sendTransportRequestToExtension(request); + } + private void registerRequestHandler() { transportService.registerRequestHandler( REQUEST_EXTENSION_REGISTER_REST_ACTIONS, @@ -202,14 +225,6 @@ private void registerRequestHandler() { ExtensionRequest::new, ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) ); - transportService.registerRequestHandler( - REQUEST_EXTENSION_LOCAL_NODE, - ThreadPool.Names.GENERIC, - false, - false, - ExtensionRequest::new, - ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) - ); transportService.registerRequestHandler( REQUEST_EXTENSION_CLUSTER_SETTINGS, ThreadPool.Names.GENERIC, @@ -242,7 +257,19 @@ private void registerRequestHandler() { false, false, RegisterTransportActionsRequest::new, - ((request, channel, task) -> channel.sendResponse(handleRegisterTransportActionsRequest(request))) + ((request, channel, task) -> channel.sendResponse( + extensionTransportActionsHandler.handleRegisterTransportActionsRequest(request) + )) + ); + transportService.registerRequestHandler( + TRANSPORT_ACTION_REQUEST_FROM_EXTENSION, + ThreadPool.Names.GENERIC, + false, + false, + TransportActionRequestFromExtension::new, + ((request, channel, task) -> channel.sendResponse( + extensionTransportActionsHandler.handleTransportActionRequestFromExtension(request) + )) ); } @@ -301,7 +328,8 @@ private void loadExtension(Extension extension) throws IOException { extension.getClassName(), new ArrayList(), Boolean.parseBoolean(extension.hasNativeController()) - ) + ), + extension.getDependencies() ); extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); @@ -364,7 +392,7 @@ public String executor() { initializeExtensionResponseHandler ); // TODO: make asynchronous - inProgressFuture.get(100, TimeUnit.SECONDS); + inProgressFuture.get(EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS); } catch (Exception e) { try { throw e; @@ -374,22 +402,6 @@ public String executor() { } } - /** - * Handles a {@link RegisterTransportActionsRequest}. - * - * @param transportActionsRequest The request to handle. - * @return A {@link AcknowledgedResponse} indicating success. - * @throws Exception if the request is not handled properly. - */ - TransportResponse handleRegisterTransportActionsRequest(RegisterTransportActionsRequest transportActionsRequest) throws Exception { - /* - * TODO: https://github.com/opensearch-project/opensearch-sdk-java/issues/107 - * Register these new Transport Actions with ActionModule - * and add support for NodeClient to recognise these actions when making transport calls. - */ - return new AcknowledgedResponse(true); - } - /** * Handles an {@link ExtensionRequest}. * @@ -401,8 +413,6 @@ TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) thro switch (extensionRequest.getRequestType()) { case REQUEST_EXTENSION_CLUSTER_STATE: return new ClusterStateResponse(clusterService.getClusterName(), clusterService.state(), false); - case REQUEST_EXTENSION_LOCAL_NODE: - return new LocalNodeResponse(clusterService); case REQUEST_EXTENSION_CLUSTER_SETTINGS: return new ClusterSettingsResponse(clusterService); case REQUEST_EXTENSION_ENVIRONMENT_SETTINGS: @@ -477,7 +487,7 @@ public void beforeIndexRemoved( acknowledgedResponseHandler ); // TODO: make asynchronous - inProgressIndexNameFuture.get(100, TimeUnit.SECONDS); + inProgressIndexNameFuture.get(EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS); logger.info("Received ack response from Extension"); } catch (Exception e) { try { @@ -513,7 +523,7 @@ public String executor() { indicesModuleResponseHandler ); // TODO: make asynchronous - inProgressFuture.get(100, TimeUnit.SECONDS); + inProgressFuture.get(EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS); logger.info("Received response from Extension"); } catch (Exception e) { try { @@ -547,10 +557,6 @@ public static String getRequestExtensionClusterState() { return REQUEST_EXTENSION_CLUSTER_STATE; } - public static String getRequestExtensionLocalNode() { - return REQUEST_EXTENSION_LOCAL_NODE; - } - public static String getRequestExtensionClusterSettings() { return REQUEST_EXTENSION_CLUSTER_SETTINGS; } @@ -661,4 +667,32 @@ public void setEnvironmentSettings(Settings environmentSettings) { this.environmentSettings = environmentSettings; } + public static String getRequestExtensionHandleTransportAction() { + return REQUEST_EXTENSION_HANDLE_TRANSPORT_ACTION; + } + + public static String getTransportActionRequestFromExtension() { + return TRANSPORT_ACTION_REQUEST_FROM_EXTENSION; + } + + public static int getExtensionRequestWaitTimeout() { + return EXTENSION_REQUEST_WAIT_TIMEOUT; + } + + public ExtensionTransportActionsHandler getExtensionTransportActionsHandler() { + return extensionTransportActionsHandler; + } + + public void setExtensionTransportActionsHandler(ExtensionTransportActionsHandler extensionTransportActionsHandler) { + this.extensionTransportActionsHandler = extensionTransportActionsHandler; + } + + public NodeClient getClient() { + return client; + } + + public void setClient(NodeClient client) { + this.client = client; + } + } diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java index 8b6226e578ea3..61ab481bc0b76 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java @@ -9,6 +9,7 @@ package org.opensearch.extensions; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -43,6 +44,7 @@ public static class Extension { private String className; private String customFolderName; private String hasNativeController; + private List dependencies = Collections.emptyList(); public Extension() { name = ""; @@ -184,6 +186,10 @@ public void setHasNativeController(String hasNativeController) { this.hasNativeController = hasNativeController; } + public List getDependencies() { + return dependencies; + } + } public List getExtensions() { diff --git a/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java b/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java index a3603aaf22dd0..47061f94dee83 100644 --- a/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java +++ b/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java @@ -8,6 +8,9 @@ package org.opensearch.extensions; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.TransportAction; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; @@ -16,6 +19,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Map.Entry; /** * Request to register extension Transport actions @@ -23,20 +27,28 @@ * @opensearch.internal */ public class RegisterTransportActionsRequest extends TransportRequest { - private Map transportActions; + private String uniqueId; + private Map>> transportActions; - public RegisterTransportActionsRequest(Map transportActions) { + public RegisterTransportActionsRequest( + String uniqueId, + Map>> transportActions + ) { + this.uniqueId = uniqueId; this.transportActions = new HashMap<>(transportActions); } public RegisterTransportActionsRequest(StreamInput in) throws IOException { super(in); - Map actions = new HashMap<>(); + this.uniqueId = in.readString(); + Map>> actions = new HashMap<>(); int actionCount = in.readVInt(); for (int i = 0; i < actionCount; i++) { try { String actionName = in.readString(); - Class transportAction = Class.forName(in.readString()); + @SuppressWarnings("unchecked") + Class> transportAction = (Class< + ? extends TransportAction>) Class.forName(in.readString()); actions.put(actionName, transportAction); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not read transport action"); @@ -45,15 +57,21 @@ public RegisterTransportActionsRequest(StreamInput in) throws IOException { this.transportActions = actions; } - public Map getTransportActions() { + public String getUniqueId() { + return uniqueId; + } + + public Map>> getTransportActions() { return transportActions; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeString(uniqueId); out.writeVInt(this.transportActions.size()); - for (Map.Entry action : transportActions.entrySet()) { + for (Entry>> action : transportActions + .entrySet()) { out.writeString(action.getKey()); out.writeString(action.getValue().getName()); } @@ -61,7 +79,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return "TransportActionsRequest{actions=" + transportActions + "}"; + return "TransportActionsRequest{uniqueId=" + uniqueId + ", actions=" + transportActions + "}"; } @Override @@ -69,11 +87,11 @@ public boolean equals(Object obj) { if (this == obj) return true; if (obj == null || getClass() != obj.getClass()) return false; RegisterTransportActionsRequest that = (RegisterTransportActionsRequest) obj; - return Objects.equals(transportActions, that.transportActions); + return Objects.equals(uniqueId, that.uniqueId) && Objects.equals(transportActions, that.transportActions); } @Override public int hashCode() { - return Objects.hash(transportActions); + return Objects.hash(uniqueId, transportActions); } } diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionActionRequest.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionRequest.java new file mode 100644 index 0000000000000..801b40e847d21 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionRequest.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * This class translates Extension transport request to ActionRequest + * which is internally used to make transport action call. + * + * @opensearch.internal + */ +public class ExtensionActionRequest extends ActionRequest { + /** + * action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + */ + private final String action; + /** + * requestBytes is the raw bytes being transported between extensions. + */ + private final byte[] requestBytes; + + /** + * ExtensionActionRequest constructor. + * + * @param action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + * @param requestBytes is the raw bytes being transported between extensions. + */ + public ExtensionActionRequest(String action, byte[] requestBytes) { + this.action = action; + this.requestBytes = requestBytes; + } + + /** + * ExtensionActionRequest constructor from {@link StreamInput}. + * + * @param in bytes stream input used to de-serialize the message. + * @throws IOException when message de-serialization fails. + */ + ExtensionActionRequest(StreamInput in) throws IOException { + super(in); + action = in.readString(); + requestBytes = in.readByteArray(); + } + + public String getAction() { + return action; + } + + public byte[] getRequestBytes() { + return requestBytes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(action); + out.writeByteArray(requestBytes); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java new file mode 100644 index 0000000000000..68729ada48c25 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * This class encapsulates the transport response from extension + * + * @opensearch.internal + */ +public class ExtensionActionResponse extends ActionResponse { + /** + * responseBytes is the raw bytes being transported between extensions. + */ + private byte[] responseBytes; + + /** + * ExtensionActionResponse constructor. + * + * @param responseBytes is the raw bytes being transported between extensions. + */ + public ExtensionActionResponse(byte[] responseBytes) { + this.responseBytes = responseBytes; + } + + /** + * ExtensionActionResponse constructor from {@link StreamInput}. + * + * @param in bytes stream input used to de-serialize the message. + * @throws IOException when message de-serialization fails. + */ + public ExtensionActionResponse(StreamInput in) throws IOException { + responseBytes = in.readByteArray(); + } + + public byte[] getResponseBytes() { + return responseBytes; + } + + public void setResponseBytes(byte[] responseBytes) { + this.responseBytes = responseBytes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByteArray(responseBytes); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionHandleTransportRequest.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionHandleTransportRequest.java new file mode 100644 index 0000000000000..1b946d08f0459 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionHandleTransportRequest.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class encapsulates a transport request to extension + * + * @opensearch.api + */ +public class ExtensionHandleTransportRequest extends TransportRequest { + /** + * action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + */ + private final String action; + /** + * requestBytes is the raw bytes being transported between extensions. + */ + private final byte[] requestBytes; + + /** + * ExtensionHandleTransportRequest constructor. + * + * @param action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + * @param requestBytes is the raw bytes being transported between extensions. + */ + public ExtensionHandleTransportRequest(String action, byte[] requestBytes) { + this.action = action; + this.requestBytes = requestBytes; + } + + /** + * ExtensionHandleTransportRequest constructor from {@link StreamInput}. + * + * @param in bytes stream input used to de-serialize the message. + * @throws IOException when message de-serialization fails. + */ + public ExtensionHandleTransportRequest(StreamInput in) throws IOException { + super(in); + this.action = in.readString(); + this.requestBytes = in.readByteArray(); + } + + public String getAction() { + return this.action; + } + + public byte[] getRequestBytes() { + return this.requestBytes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(action); + out.writeByteArray(requestBytes); + } + + @Override + public String toString() { + return "ExtensionHandleTransportRequest{action=" + action + ", requestBytes=" + requestBytes + "}"; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + ExtensionHandleTransportRequest that = (ExtensionHandleTransportRequest) obj; + return Objects.equals(action, that.action) && Objects.equals(requestBytes, that.requestBytes); + } + + @Override + public int hashCode() { + return Objects.hash(action, requestBytes); + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyAction.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyAction.java new file mode 100644 index 0000000000000..7345cf44e007f --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.action.ActionType; + +/** + * The main proxy action for all extensions + * + * @opensearch.internal + */ +public class ExtensionProxyAction extends ActionType { + public static final String NAME = "cluster:internal/extensions"; + public static final ExtensionProxyAction INSTANCE = new ExtensionProxyAction(); + + public ExtensionProxyAction() { + super(NAME, ExtensionActionResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java new file mode 100644 index 0000000000000..5976db78002eb --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.node.Node; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +/** + * The main proxy transport action used to proxy a transport request from extension to another extension + * + * @opensearch.internal + */ +public class ExtensionTransportAction extends HandledTransportAction { + + private final String nodeName; + private final ClusterService clusterService; + private final ExtensionsManager extensionsManager; + + @Inject + public ExtensionTransportAction( + Settings settings, + TransportService transportService, + ActionFilters actionFilters, + ClusterService clusterService, + ExtensionsManager extensionsManager + ) { + super(ExtensionProxyAction.NAME, transportService, actionFilters, ExtensionActionRequest::new); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + this.clusterService = clusterService; + this.extensionsManager = extensionsManager; + } + + @Override + protected void doExecute(Task task, ExtensionActionRequest request, ActionListener listener) { + try { + listener.onResponse(extensionsManager.handleTransportRequest(request)); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java new file mode 100644 index 0000000000000..ac3ec6630634a --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java @@ -0,0 +1,193 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.extensions.AcknowledgedResponse; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.RegisterTransportActionsRequest; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ActionNotFoundTransportException; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * This class manages TransportActions for extensions + * + * @opensearch.internal + */ +public class ExtensionTransportActionsHandler { + private static final Logger logger = LogManager.getLogger(ExtensionTransportActionsHandler.class); + private Map actionsMap; + private final Map extensionIdMap; + private final TransportService transportService; + private final NodeClient client; + + public ExtensionTransportActionsHandler( + Map extensionIdMap, + TransportService transportService, + NodeClient client + ) { + this.actionsMap = new HashMap<>(); + this.extensionIdMap = extensionIdMap; + this.transportService = transportService; + this.client = client; + } + + /** + * Method to register actions for extensions. + * + * @param action to be registered. + * @param extension for which action is being registered. + * @throws IllegalArgumentException when action being registered already is registered. + */ + void registerAction(String action, DiscoveryExtensionNode extension) throws IllegalArgumentException { + if (actionsMap.containsKey(action)) { + throw new IllegalArgumentException("The " + action + " you are trying to register is already registered"); + } + actionsMap.putIfAbsent(action, extension); + } + + /** + * Method to get extension for a given action. + * + * @param action for which to get the registered extension. + * @return the extension. + */ + public DiscoveryExtensionNode getExtension(String action) { + return actionsMap.get(action); + } + + /** + * Handles a {@link RegisterTransportActionsRequest}. + * + * @param transportActionsRequest The request to handle. + * @return A {@link AcknowledgedResponse} indicating success. + */ + public TransportResponse handleRegisterTransportActionsRequest(RegisterTransportActionsRequest transportActionsRequest) { + /* + * We are proxying the transport Actions through ExtensionProxyAction, so we really dont need to register dynamic actions for now. + */ + logger.debug("Register Transport Actions request recieved {}", transportActionsRequest); + DiscoveryExtensionNode extension = extensionIdMap.get(transportActionsRequest.getUniqueId()); + try { + for (String action : transportActionsRequest.getTransportActions().keySet()) { + registerAction(action, extension); + } + } catch (Exception e) { + logger.error("Could not register Transport Action " + e); + return new AcknowledgedResponse(false); + } + return new AcknowledgedResponse(true); + } + + /** + * Method which handles transport action request from an extension. + * + * @param request from extension. + * @return {@link TransportResponse} which is sent back to the transport action invoker. + * @throws InterruptedException when message transport fails. + */ + public TransportResponse handleTransportActionRequestFromExtension(TransportActionRequestFromExtension request) + throws InterruptedException { + DiscoveryExtensionNode extension = extensionIdMap.get(request.getUniqueId()); + final CountDownLatch inProgressLatch = new CountDownLatch(1); + final TransportActionResponseToExtension response = new TransportActionResponseToExtension(new byte[0]); + client.execute( + ExtensionProxyAction.INSTANCE, + new ExtensionActionRequest(request.getAction(), request.getRequestBytes()), + new ActionListener() { + @Override + public void onResponse(ExtensionActionResponse actionResponse) { + response.setResponseBytes(actionResponse.getResponseBytes()); + inProgressLatch.countDown(); + } + + @Override + public void onFailure(Exception exp) { + logger.debug("Transport request failed", exp); + byte[] responseBytes = ("Request failed: " + exp.getMessage()).getBytes(StandardCharsets.UTF_8); + response.setResponseBytes(responseBytes); + inProgressLatch.countDown(); + } + } + ); + inProgressLatch.await(ExtensionsManager.EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS); + return response; + } + + /** + * Method to send transport action request to an extension to handle. + * + * @param request to extension to handle transport request. + * @return {@link ExtensionActionResponse} which encapsulates the transport response from the extension. + * @throws InterruptedException when message transport fails. + */ + public ExtensionActionResponse sendTransportRequestToExtension(ExtensionActionRequest request) throws InterruptedException { + DiscoveryExtensionNode extension = actionsMap.get(request.getAction()); + if (extension == null) { + throw new ActionNotFoundTransportException(request.getAction()); + } + final CountDownLatch inProgressLatch = new CountDownLatch(1); + final ExtensionActionResponse extensionActionResponse = new ExtensionActionResponse(new byte[0]); + final TransportResponseHandler extensionActionResponseTransportResponseHandler = + new TransportResponseHandler() { + + @Override + public ExtensionActionResponse read(StreamInput in) throws IOException { + return new ExtensionActionResponse(in); + } + + @Override + public void handleResponse(ExtensionActionResponse response) { + extensionActionResponse.setResponseBytes(response.getResponseBytes()); + inProgressLatch.countDown(); + } + + @Override + public void handleException(TransportException exp) { + logger.debug("Transport request failed", exp); + byte[] responseBytes = ("Request failed: " + exp.getMessage()).getBytes(StandardCharsets.UTF_8); + extensionActionResponse.setResponseBytes(responseBytes); + inProgressLatch.countDown(); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + try { + transportService.sendRequest( + extension, + ExtensionsManager.REQUEST_EXTENSION_HANDLE_TRANSPORT_ACTION, + new ExtensionHandleTransportRequest(request.getAction(), request.getRequestBytes()), + extensionActionResponseTransportResponseHandler + ); + } catch (Exception e) { + logger.info("Failed to send transport action to extension " + extension.getName(), e); + } + inProgressLatch.await(ExtensionsManager.EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS); + return extensionActionResponse; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/TransportActionRequestFromExtension.java b/server/src/main/java/org/opensearch/extensions/action/TransportActionRequestFromExtension.java new file mode 100644 index 0000000000000..df494297559b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/TransportActionRequestFromExtension.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * Transport Action Request from Extension + * + * @opensearch.api + */ +public class TransportActionRequestFromExtension extends TransportRequest { + /** + * action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + */ + private final String action; + /** + * requestBytes is the raw bytes being transported between extensions. + */ + private final byte[] requestBytes; + /** + * uniqueId to identify which extension is making a transport request call. + */ + private final String uniqueId; + + /** + * TransportActionRequestFromExtension constructor. + * + * @param action is the transport action intended to be invoked which is registered by an extension via {@link ExtensionTransportActionsHandler}. + * @param requestBytes is the raw bytes being transported between extensions. + * @param uniqueId to identify which extension is making a transport request call. + */ + public TransportActionRequestFromExtension(String action, byte[] requestBytes, String uniqueId) { + this.action = action; + this.requestBytes = requestBytes; + this.uniqueId = uniqueId; + } + + /** + * TransportActionRequestFromExtension constructor from {@link StreamInput}. + * + * @param in bytes stream input used to de-serialize the message. + * @throws IOException when message de-serialization fails. + */ + public TransportActionRequestFromExtension(StreamInput in) throws IOException { + super(in); + this.action = in.readString(); + this.requestBytes = in.readByteArray(); + this.uniqueId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(action); + out.writeByteArray(requestBytes); + out.writeString(uniqueId); + } + + public String getAction() { + return this.action; + } + + public byte[] getRequestBytes() { + return this.requestBytes; + } + + public String getUniqueId() { + return this.uniqueId; + } + + @Override + public String toString() { + return "TransportActionRequestFromExtension{action=" + action + ", requestBytes=" + requestBytes + ", uniqueId=" + uniqueId + "}"; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + TransportActionRequestFromExtension that = (TransportActionRequestFromExtension) obj; + return Objects.equals(action, that.action) + && Objects.equals(requestBytes, that.requestBytes) + && Objects.equals(uniqueId, that.uniqueId); + } + + @Override + public int hashCode() { + return Objects.hash(action, requestBytes, uniqueId); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/TransportActionResponseToExtension.java b/server/src/main/java/org/opensearch/extensions/action/TransportActionResponseToExtension.java new file mode 100644 index 0000000000000..2913402bcd5e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/TransportActionResponseToExtension.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * This class encapsulates transport response to extension. + * + * @opensearch.api + */ +public class TransportActionResponseToExtension extends TransportResponse { + /** + * responseBytes is the raw bytes being transported between extensions. + */ + private byte[] responseBytes; + + /** + * TransportActionResponseToExtension constructor. + * + * @param responseBytes is the raw bytes being transported between extensions. + */ + public TransportActionResponseToExtension(byte[] responseBytes) { + this.responseBytes = responseBytes; + } + + /** + * TransportActionResponseToExtension constructor from {@link StreamInput} + * @param in bytes stream input used to de-serialize the message. + * @throws IOException when message de-serialization fails. + */ + public TransportActionResponseToExtension(StreamInput in) throws IOException { + this.responseBytes = in.readByteArray(); + } + + public void setResponseBytes(byte[] responseBytes) { + this.responseBytes = responseBytes; + } + + public byte[] getResponseBytes() { + return responseBytes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByteArray(responseBytes); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/action/package-info.java b/server/src/main/java/org/opensearch/extensions/action/package-info.java new file mode 100644 index 0000000000000..9bad08eaeb921 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/action/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Actions classes for the extensions package. OpenSearch extensions provide extensibility to OpenSearch.*/ +package org.opensearch.extensions.action; diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java new file mode 100644 index 0000000000000..da59578b4917e --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestRequest.Method; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Request to execute REST actions on extension node. + * This contains necessary portions of a {@link RestRequest} object, but does not pass the full request for security concerns. + * + * @opensearch.api + */ +public class ExtensionRestRequest extends TransportRequest { + + private Method method; + private String path; + private Map params; + private XContentType xContentType = null; + private BytesReference content; + // The owner of this request object + // Will be replaced with PrincipalIdentifierToken class from feature/identity + private String principalIdentifierToken; + + // Tracks consumed parameters and content + private final Set consumedParams = new HashSet<>(); + private boolean contentConsumed = false; + + /** + * This object can be instantiated given method, uri, params, content and identifier + * + * @param method of type {@link Method} + * @param path the REST path string (excluding the query) + * @param params the REST params + * @param xContentType the content type, or null for plain text or no content + * @param content the REST request content + * @param principalIdentifier the owner of this request + */ + public ExtensionRestRequest( + Method method, + String path, + Map params, + XContentType xContentType, + BytesReference content, + String principalIdentifier + ) { + this.method = method; + this.path = path; + this.params = params; + this.xContentType = xContentType; + this.content = content; + this.principalIdentifierToken = principalIdentifier; + } + + /** + * Instantiate this request from input stream + * + * @param in Input stream + * @throws IOException on failure to read the stream + */ + public ExtensionRestRequest(StreamInput in) throws IOException { + super(in); + method = in.readEnum(RestRequest.Method.class); + path = in.readString(); + params = in.readMap(StreamInput::readString, StreamInput::readString); + if (in.readBoolean()) { + xContentType = in.readEnum(XContentType.class); + } + content = in.readBytesReference(); + principalIdentifierToken = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(method); + out.writeString(path); + out.writeMap(params, StreamOutput::writeString, StreamOutput::writeString); + out.writeBoolean(xContentType != null); + if (xContentType != null) { + out.writeEnum(xContentType); + } + out.writeBytesReference(content); + out.writeString(principalIdentifierToken); + } + + /** + * Gets the REST method + * + * @return This REST request {@link Method} type + */ + public Method method() { + return method; + } + + /** + * Gets the REST path + * + * @return This REST request's path + */ + public String path() { + return path; + } + + /** + * Gets the full map of params without consuming them. Rest Handlers should use {@link #param(String)} or {@link #param(String, String)} + * to get parameter values. + * + * @return This REST request's params + */ + public Map params() { + return params; + } + + /** + * Tests whether a parameter named {@code key} exists. + * + * @param key The parameter to test. + * @return True if there is a value for this parameter. + */ + public boolean hasParam(String key) { + return params.containsKey(key); + } + + /** + * Gets the value of a parameter, consuming it in the process. + * + * @param key The parameter key + * @return The parameter value if it exists, or null. + */ + public String param(String key) { + consumedParams.add(key); + return params.get(key); + } + + /** + * Gets the value of a parameter, consuming it in the process. + * + * @param key The parameter key + * @param defaultValue A value to return if the parameter value doesn't exist. + * @return The parameter value if it exists, or the default value. + */ + public String param(String key, String defaultValue) { + consumedParams.add(key); + return params.getOrDefault(key, defaultValue); + } + + /** + * Gets the value of a parameter as a long, consuming it in the process. + * + * @param key The parameter key + * @param defaultValue A value to return if the parameter value doesn't exist. + * @return The parameter value if it exists, or the default value. + */ + public long paramAsLong(String key, long defaultValue) { + String value = param(key); + if (value == null) { + return defaultValue; + } + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Unable to parse param '" + key + "' value '" + value + "' to a long.", e); + } + } + + /** + * Returns parameters consumed by {@link #param(String)} or {@link #param(String, String)}. + * + * @return a list of consumed parameters. + */ + public List consumedParams() { + return new ArrayList<>(consumedParams); + } + + /** + * Gets the content type, if any. + * + * @return the content type of the {@link #content()}, or null if the context is plain text or if there is no content. + */ + public XContentType getXContentType() { + return xContentType; + } + + /** + * Gets the content. + * + * @return This REST request's content + */ + public BytesReference content() { + contentConsumed = true; + return content; + } + + /** + * Tests whether content exists. + * + * @return True if there is non-empty content. + */ + public boolean hasContent() { + return content.length() > 0; + } + + /** + * Tests whether content has been consumed. + * + * @return True if the content was consumed. + */ + public boolean isContentConsumed() { + return contentConsumed; + } + + /** + * Gets a parser for the contents of this request if there is content and an xContentType. + * + * @param xContentRegistry The extension's xContentRegistry + * @return A parser for the given content and content type. + * @throws OpenSearchParseException on missing body or xContentType. + * @throws IOException on a failure creating the parser. + */ + public final XContentParser contentParser(NamedXContentRegistry xContentRegistry) throws IOException { + if (!hasContent() || getXContentType() == null) { + throw new OpenSearchParseException("There is no request body or the ContentType is invalid."); + } + return getXContentType().xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, content.streamInput()); + } + + /** + * @return This REST request issuer's identity token + */ + public String getRequestIssuerIdentity() { + return principalIdentifierToken; + } + + @Override + public String toString() { + return "ExtensionRestRequest{method=" + + method + + ", path=" + + path + + ", params=" + + params + + ", xContentType=" + + xContentType + + ", contentLength=" + + content.length() + + ", requester=" + + principalIdentifierToken + + "}"; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + ExtensionRestRequest that = (ExtensionRestRequest) obj; + return Objects.equals(method, that.method) + && Objects.equals(path, that.path) + && Objects.equals(params, that.params) + && Objects.equals(xContentType, that.xContentType) + && Objects.equals(content, that.content) + && Objects.equals(principalIdentifierToken, that.principalIdentifierToken); + } + + @Override + public int hashCode() { + return Objects.hash(method, path, params, xContentType, content, principalIdentifierToken); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java new file mode 100644 index 0000000000000..0eb59823bee93 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestStatus; + +import java.util.List; + +/** + * A subclass of {@link BytesRestResponse} which also tracks consumed parameters and content. + * + * @opensearch.api + */ +public class ExtensionRestResponse extends BytesRestResponse { + + private final List consumedParams; + private final boolean contentConsumed; + + /** + * Creates a new response based on {@link XContentBuilder}. + * + * @param request the REST request being responded to. + * @param status The REST status. + * @param builder The builder for the response. + */ + public ExtensionRestResponse(ExtensionRestRequest request, RestStatus status, XContentBuilder builder) { + super(status, builder); + this.consumedParams = request.consumedParams(); + this.contentConsumed = request.isContentConsumed(); + } + + /** + * Creates a new plain text response. + * + * @param request the REST request being responded to. + * @param status The REST status. + * @param content A plain text response string. + */ + public ExtensionRestResponse(ExtensionRestRequest request, RestStatus status, String content) { + super(status, content); + this.consumedParams = request.consumedParams(); + this.contentConsumed = request.isContentConsumed(); + } + + /** + * Creates a new plain text response. + * + * @param request the REST request being responded to. + * @param status The REST status. + * @param contentType The content type of the response string. + * @param content A response string. + */ + public ExtensionRestResponse(ExtensionRestRequest request, RestStatus status, String contentType, String content) { + super(status, contentType, content); + this.consumedParams = request.consumedParams(); + this.contentConsumed = request.isContentConsumed(); + } + + /** + * Creates a binary response. + * + * @param request the REST request being responded to. + * @param status The REST status. + * @param contentType The content type of the response bytes. + * @param content Response bytes. + */ + public ExtensionRestResponse(ExtensionRestRequest request, RestStatus status, String contentType, byte[] content) { + super(status, contentType, content); + this.consumedParams = request.consumedParams(); + this.contentConsumed = request.isContentConsumed(); + } + + /** + * Creates a binary response. + * + * @param request the REST request being responded to. + * @param status The REST status. + * @param contentType The content type of the response bytes. + * @param content Response bytes. + */ + public ExtensionRestResponse(ExtensionRestRequest request, RestStatus status, String contentType, BytesReference content) { + super(status, contentType, content); + this.consumedParams = request.consumedParams(); + this.contentConsumed = request.isContentConsumed(); + } + + /** + * Gets the list of consumed parameters. These are needed to consume the parameters of the original request. + * + * @return the list of consumed params. + */ + public List getConsumedParams() { + return consumedParams; + } + + /** + * Reports whether content was consumed. + * + * @return true if the content was consumed, false otherwise. + */ + public boolean isContentConsumed() { + return contentConsumed; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionRequest.java b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionRequest.java deleted file mode 100644 index 128dad2645b42..0000000000000 --- a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionRequest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.extensions.rest; - -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.transport.TransportRequest; - -import java.io.IOException; -import java.util.Objects; - -/** - * Request to execute REST actions on extension node - * - * @opensearch.internal - */ -public class RestExecuteOnExtensionRequest extends TransportRequest { - - private Method method; - private String uri; - - public RestExecuteOnExtensionRequest(Method method, String uri) { - this.method = method; - this.uri = uri; - } - - public RestExecuteOnExtensionRequest(StreamInput in) throws IOException { - super(in); - try { - method = RestRequest.Method.valueOf(in.readString()); - } catch (IllegalArgumentException e) { - throw new IOException(e); - } - uri = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(method.name()); - out.writeString(uri); - } - - public Method getMethod() { - return method; - } - - public String getUri() { - return uri; - } - - @Override - public String toString() { - return "RestExecuteOnExtensionRequest{method=" + method + ", uri=" + uri + "}"; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null || getClass() != obj.getClass()) return false; - RestExecuteOnExtensionRequest that = (RestExecuteOnExtensionRequest) obj; - return Objects.equals(method, that.method) && Objects.equals(uri, that.uri); - } - - @Override - public int hashCode() { - return Objects.hash(method, uri); - } -} diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java index b7d7aae3faaab..e2625105e705c 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java @@ -10,14 +10,11 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestResponse; import org.opensearch.rest.RestStatus; import org.opensearch.transport.TransportResponse; import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -27,20 +24,13 @@ * @opensearch.internal */ public class RestExecuteOnExtensionResponse extends TransportResponse { + private RestStatus status; private String contentType; private byte[] content; private Map> headers; - - /** - * Instantiate this object with a status and response string. - * - * @param status The REST status. - * @param responseString The response content as a String. - */ - public RestExecuteOnExtensionResponse(RestStatus status, String responseString) { - this(status, BytesRestResponse.TEXT_CONTENT_TYPE, responseString.getBytes(StandardCharsets.UTF_8), Collections.emptyMap()); - } + private List consumedParams; + private boolean contentConsumed; /** * Instantiate this object with the components of a {@link RestResponse}. @@ -49,33 +39,49 @@ public RestExecuteOnExtensionResponse(RestStatus status, String responseString) * @param contentType The type of the content. * @param content The content. * @param headers The headers. + * @param consumedParams The consumed params. + * @param contentConsumed Whether content was consumed. */ - public RestExecuteOnExtensionResponse(RestStatus status, String contentType, byte[] content, Map> headers) { + public RestExecuteOnExtensionResponse( + RestStatus status, + String contentType, + byte[] content, + Map> headers, + List consumedParams, + boolean contentConsumed + ) { + super(); setStatus(status); setContentType(contentType); setContent(content); setHeaders(headers); + setConsumedParams(consumedParams); + setContentConsumed(contentConsumed); } /** - * Instantiate this object from a Transport Stream + * Instantiate this object from a Transport Stream. * * @param in The stream input. * @throws IOException on transport failure. */ public RestExecuteOnExtensionResponse(StreamInput in) throws IOException { - setStatus(RestStatus.readFrom(in)); + setStatus(in.readEnum(RestStatus.class)); setContentType(in.readString()); setContent(in.readByteArray()); setHeaders(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); + setConsumedParams(in.readStringList()); + setContentConsumed(in.readBoolean()); } @Override public void writeTo(StreamOutput out) throws IOException { - RestStatus.writeTo(out, status); + out.writeEnum(status); out.writeString(contentType); out.writeByteArray(content); out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeStringCollection(consumedParams); + out.writeBoolean(contentConsumed); } public RestStatus getStatus() { @@ -109,4 +115,20 @@ public Map> getHeaders() { public void setHeaders(Map> headers) { this.headers = Map.copyOf(headers); } + + public List getConsumedParams() { + return consumedParams; + } + + public void setConsumedParams(List consumedParams) { + this.consumedParams = consumedParams; + } + + public boolean isContentConsumed() { + return contentConsumed; + } + + public void setContentConsumed(boolean contentConsumed) { + this.contentConsumed = contentConsumed; + } } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index d08a74c0ba314..38e92ed604a09 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -11,7 +11,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.rest.BaseRestHandler; @@ -26,14 +28,13 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.security.Principal; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableList; @@ -44,17 +45,23 @@ public class RestSendToExtensionAction extends BaseRestHandler { private static final String SEND_TO_EXTENSION_ACTION = "send_to_extension_action"; private static final Logger logger = LogManager.getLogger(RestSendToExtensionAction.class); - private static final String CONSUMED_PARAMS_KEY = "extension.consumed.parameters"; + // To replace with user identity see https://github.com/opensearch-project/OpenSearch/pull/4247 + private static final Principal DEFAULT_PRINCIPAL = new Principal() { + @Override + public String getName() { + return "OpenSearchUser"; + } + }; private final List routes; - private final String uriPrefix; + private final String pathPrefix; private final DiscoveryExtensionNode discoveryExtensionNode; private final TransportService transportService; /** * Instantiates this object using a {@link RegisterRestActionsRequest} to populate the routes. * - * @param restActionsRequest A request encapsulating a list of Strings with the API methods and URIs. + * @param restActionsRequest A request encapsulating a list of Strings with the API methods and paths. * @param transportService The OpenSearch transport service * @param discoveryExtensionNode The extension node to which to send actions */ @@ -63,20 +70,20 @@ public RestSendToExtensionAction( DiscoveryExtensionNode discoveryExtensionNode, TransportService transportService ) { - this.uriPrefix = "/_extensions/_" + restActionsRequest.getUniqueId(); + this.pathPrefix = "/_extensions/_" + restActionsRequest.getUniqueId(); List restActionsAsRoutes = new ArrayList<>(); for (String restAction : restActionsRequest.getRestActions()) { RestRequest.Method method; - String uri; + String path; try { int delim = restAction.indexOf(' '); method = RestRequest.Method.valueOf(restAction.substring(0, delim)); - uri = uriPrefix + restAction.substring(delim).trim(); + path = pathPrefix + restAction.substring(delim).trim(); } catch (IndexOutOfBoundsException | IllegalArgumentException e) { throw new IllegalArgumentException(restAction + " does not begin with a valid REST method"); } - logger.info("Registering: " + method + " " + uri); - restActionsAsRoutes.add(new Route(method, uri)); + logger.info("Registering: " + method + " " + path); + restActionsAsRoutes.add(new Route(method, path)); } this.routes = unmodifiableList(restActionsAsRoutes); this.discoveryExtensionNode = discoveryExtensionNode; @@ -95,21 +102,27 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - Method method = request.getHttpRequest().method(); - String uri = request.getHttpRequest().uri(); - if (uri.startsWith(uriPrefix)) { - uri = uri.substring(uriPrefix.length()); + Method method = request.method(); + String path = request.path(); + Map params = request.params(); + XContentType contentType = request.getXContentType(); + BytesReference content = request.content(); + + if (path.startsWith(pathPrefix)) { + path = path.substring(pathPrefix.length()); } - String message = "Forwarding the request " + method + " " + uri + " to " + discoveryExtensionNode; + String message = "Forwarding the request " + method + " " + path + " to " + discoveryExtensionNode; logger.info(message); // Initialize response. Values will be changed in the handler. final RestExecuteOnExtensionResponse restExecuteOnExtensionResponse = new RestExecuteOnExtensionResponse( RestStatus.INTERNAL_SERVER_ERROR, BytesRestResponse.TEXT_CONTENT_TYPE, message.getBytes(StandardCharsets.UTF_8), - emptyMap() + emptyMap(), + emptyList(), + false ); - final CompletableFuture inProgressFuture = new CompletableFuture<>(); + final CountDownLatch inProgressLatch = new CountDownLatch(1); final TransportResponseHandler restExecuteOnExtensionResponseHandler = new TransportResponseHandler< RestExecuteOnExtensionResponse>() { @@ -124,27 +137,21 @@ public void handleResponse(RestExecuteOnExtensionResponse response) { restExecuteOnExtensionResponse.setStatus(response.getStatus()); restExecuteOnExtensionResponse.setContentType(response.getContentType()); restExecuteOnExtensionResponse.setContent(response.getContent()); - // Extract the consumed parameters from the header - Map> headers = response.getHeaders(); - List consumedParams = headers.get(CONSUMED_PARAMS_KEY); - if (consumedParams != null) { - consumedParams.stream().forEach(p -> request.param(p)); + restExecuteOnExtensionResponse.setHeaders(response.getHeaders()); + // Consume parameters and content + response.getConsumedParams().stream().forEach(p -> request.param(p)); + if (response.isContentConsumed()) { + request.content(); } - Map> headersWithoutConsumedParams = headers.entrySet() - .stream() - .filter(e -> !e.getKey().equals(CONSUMED_PARAMS_KEY)) - .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); - restExecuteOnExtensionResponse.setHeaders(headersWithoutConsumedParams); - inProgressFuture.complete(response); } @Override public void handleException(TransportException exp) { - logger.error("REST request failed", exp); + logger.debug("REST request failed", exp); // Status is already defaulted to 500 (INTERNAL_SERVER_ERROR) byte[] responseBytes = ("Request failed: " + exp.getMessage()).getBytes(StandardCharsets.UTF_8); restExecuteOnExtensionResponse.setContent(responseBytes); - inProgressFuture.completeExceptionally(exp); + inProgressLatch.countDown(); } @Override @@ -153,17 +160,20 @@ public String executor() { } }; try { + // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity + final String extensionTokenProcessor = "placeholder_token_processor"; + final String requestIssuerIdentity = "placeholder_request_issuer_identity"; + transportService.sendRequest( discoveryExtensionNode, ExtensionsManager.REQUEST_REST_EXECUTE_ON_EXTENSION_ACTION, // HERE BE DRAGONS - DO NOT INCLUDE HEADERS // SEE https://github.com/opensearch-project/OpenSearch/issues/4429 - new RestExecuteOnExtensionRequest(method, uri), + new ExtensionRestRequest(method, path, params, contentType, content, requestIssuerIdentity), restExecuteOnExtensionResponseHandler ); try { - // TODO: make asynchronous - inProgressFuture.get(5, TimeUnit.SECONDS); + inProgressLatch.await(5, TimeUnit.SECONDS); } catch (InterruptedException e) { return channel -> channel.sendResponse( new BytesRestResponse(RestStatus.REQUEST_TIMEOUT, "No response from extension to request.") @@ -177,11 +187,11 @@ public String executor() { restExecuteOnExtensionResponse.getContentType(), restExecuteOnExtensionResponse.getContent() ); - for (Entry> headerEntry : restExecuteOnExtensionResponse.getHeaders().entrySet()) { - for (String value : headerEntry.getValue()) { - restResponse.addHeader(headerEntry.getKey(), value); - } - } + // No constructor that includes headers so we roll our own + restExecuteOnExtensionResponse.getHeaders() + .entrySet() + .stream() + .forEach(e -> { e.getValue().stream().forEach(v -> restResponse.addHeader(e.getKey(), v)); }); return channel -> channel.sendResponse(restResponse); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e127826921fe9..46270230ccf27 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -833,7 +833,8 @@ protected Node( settingsModule, transportService, clusterService, - environment.settings() + environment.settings(), + client ); } final GatewayMetaState gatewayMetaState = new GatewayMetaState(); @@ -1001,6 +1002,9 @@ protected Node( b.bind(Client.class).toInstance(client); b.bind(NodeClient.class).toInstance(client); b.bind(Environment.class).toInstance(this.environment); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + b.bind(ExtensionsManager.class).toInstance(this.extensionsManager); + } b.bind(ThreadPool.class).toInstance(threadPool); b.bind(NodeEnvironment.class).toInstance(nodeEnvironment); b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService); diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 472899db7dad6..47820ee739c49 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -42,7 +42,6 @@ import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterSettingsResponse; -import org.opensearch.cluster.LocalNodeResponse; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -79,6 +78,7 @@ import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -94,6 +94,7 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { private RestController restController; private SettingsModule settingsModule; private ClusterService clusterService; + private NodeClient client; private MockNioTransport transport; private Path extensionDir; private final ThreadPool threadPool = new TestThreadPool(ExtensionsManagerTests.class.getSimpleName()); @@ -126,7 +127,10 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { " javaVersion: '17'", " className: fakeClass2", " customFolderName: fakeFolder2", - " hasNativeController: true" + " hasNativeController: true", + " dependencies:", + " - uniqueId: 'uniqueid0'", + " - version: '2.0.0'" ); private DiscoveryExtensionNode extensionNode; @@ -190,8 +194,10 @@ public void setup() throws Exception { "fakeClass1", new ArrayList(), false - ) + ), + Collections.emptyList() ); + client = new NoOpNodeClient(this.getTestName()); } @Override @@ -199,6 +205,7 @@ public void setup() throws Exception { public void tearDown() throws Exception { super.tearDown(); transportService.close(); + client.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } @@ -209,6 +216,10 @@ public void testDiscover() throws Exception { List expectedUninitializedExtensions = new ArrayList(); + String expectedUniqueId = "uniqueid0"; + Version expectedVersion = Version.fromString("2.0.0"); + ExtensionDependency expectedDependency = new ExtensionDependency(expectedUniqueId, expectedVersion); + expectedUninitializedExtensions.add( new DiscoveryExtensionNode( "firstExtension", @@ -228,7 +239,8 @@ public void testDiscover() throws Exception { "fakeClass1", new ArrayList(), false - ) + ), + Collections.emptyList() ) ); @@ -251,10 +263,12 @@ public void testDiscover() throws Exception { "fakeClass2", new ArrayList(), true - ) + ), + List.of(expectedDependency) ) ); assertEquals(expectedUninitializedExtensions.size(), extensionsManager.getExtensionIdMap().values().size()); + assertEquals(List.of(expectedDependency), expectedUninitializedExtensions.get(1).getDependencies()); assertTrue(expectedUninitializedExtensions.containsAll(extensionsManager.getExtensionIdMap().values())); assertTrue(extensionsManager.getExtensionIdMap().values().containsAll(expectedUninitializedExtensions)); } @@ -289,12 +303,74 @@ public void testNonUniqueExtensionsDiscovery() throws Exception { "fakeClass1", new ArrayList(), false - ) + ), + Collections.emptyList() ) ); assertEquals(expectedUninitializedExtensions.size(), extensionsManager.getExtensionIdMap().values().size()); assertTrue(expectedUninitializedExtensions.containsAll(extensionsManager.getExtensionIdMap().values())); assertTrue(extensionsManager.getExtensionIdMap().values().containsAll(expectedUninitializedExtensions)); + assertTrue(expectedUninitializedExtensions.containsAll(emptyList())); + } + + public void testDiscoveryExtension() throws Exception { + String expectedId = "test id"; + Version expectedVersion = Version.fromString("2.0.0"); + ExtensionDependency expectedDependency = new ExtensionDependency(expectedId, expectedVersion); + + DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( + "firstExtension", + "uniqueid1", + "uniqueid1", + "myIndependentPluginHost1", + "127.0.0.0", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new HashMap(), + Version.fromString("3.0.0"), + new PluginInfo( + "firstExtension", + "Fake description 1", + "0.0.7", + Version.fromString("3.0.0"), + "14", + "fakeClass1", + new ArrayList(), + false + ), + List.of(expectedDependency) + ); + + assertEquals(List.of(expectedDependency), discoveryExtensionNode.getDependencies()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + discoveryExtensionNode.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + discoveryExtensionNode = new DiscoveryExtensionNode(in); + + assertEquals(List.of(expectedDependency), discoveryExtensionNode.getDependencies()); + } + } + } + + public void testExtensionDependency() throws Exception { + String expectedUniqueId = "Test uniqueId"; + Version expectedVersion = Version.fromString("3.0.0"); + + ExtensionDependency dependency = new ExtensionDependency(expectedUniqueId, expectedVersion); + + assertEquals(expectedUniqueId, dependency.getUniqueId()); + assertEquals(expectedVersion, dependency.getVersion()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + dependency.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + dependency = new ExtensionDependency(in); + assertEquals(expectedUniqueId, dependency.getUniqueId()); + assertEquals(expectedVersion, dependency.getVersion()); + } + } } public void testNonAccessibleDirectory() throws Exception { @@ -339,12 +415,9 @@ public void testEmptyExtensionsFile() throws Exception { public void testInitialize() throws Exception { Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - transportService.start(); - transportService.acceptIncomingRequests(); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { @@ -379,8 +452,8 @@ public void testHandleRegisterRestActionsRequest() throws Exception { Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + initialize(extensionsManager); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList); @@ -392,10 +465,9 @@ public void testHandleRegisterRestActionsRequest() throws Exception { public void testHandleRegisterSettingsRequest() throws Exception { Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + initialize(extensionsManager); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); String uniqueIdStr = "uniqueid1"; List> settingsList = List.of( Setting.boolSetting("index.falseSetting", false, Property.IndexScope, Property.Dynamic), @@ -410,8 +482,8 @@ public void testHandleRegisterSettingsRequest() throws Exception { public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + initialize(extensionsManager); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList); @@ -422,12 +494,8 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep } public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exception { - - Path extensionDir = createTempDir(); - ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; List actionsList = List.of("GET", "PUT /bar", "POST /baz"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList); @@ -438,19 +506,15 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio } public void testHandleExtensionRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + initialize(extensionsManager); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); assertEquals(ClusterStateResponse.class, extensionsManager.handleExtensionRequest(clusterStateRequest).getClass()); ExtensionRequest clusterSettingRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_SETTINGS); assertEquals(ClusterSettingsResponse.class, extensionsManager.handleExtensionRequest(clusterSettingRequest).getClass()); - ExtensionRequest localNodeRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_LOCAL_NODE); - assertEquals(LocalNodeResponse.class, extensionsManager.handleExtensionRequest(localNodeRequest).getClass()); - ExtensionRequest environmentSettingsRequest = new ExtensionRequest( ExtensionsManager.RequestType.REQUEST_EXTENSION_ENVIRONMENT_SETTINGS ); @@ -529,7 +593,7 @@ public void testAddSettingsUpdateConsumerRequest() throws Exception { Path extensionDir = createTempDir(); Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); List> componentSettings = List.of( Setting.boolSetting("falseSetting", false, Property.IndexScope, Property.NodeScope), @@ -576,8 +640,7 @@ public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { Path extensionDir = createTempDir(); Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); List> componentSettings = List.of( Setting.boolSetting("falseSetting", false, Property.Dynamic), @@ -599,7 +662,7 @@ public void testUpdateSettingsRequest() throws Exception { Path extensionDir = createTempDir(); Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); Setting componentSetting = Setting.boolSetting("falseSetting", false, Property.Dynamic); SettingType settingType = SettingType.Boolean; @@ -640,19 +703,22 @@ public void testRegisterHandler() throws Exception { Collections.emptySet() ) ); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, mockTransportService, clusterService, settings); + extensionsManager.initializeServicesAndRestHandler( + restController, + settingsModule, + mockTransportService, + clusterService, + settings, + client + ); verify(mockTransportService, times(8)).registerRequestHandler(anyString(), anyString(), anyBoolean(), anyBoolean(), any(), any()); } public void testOnIndexModule() throws Exception { Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); - ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); - - transportService.start(); - transportService.acceptIncomingRequests(); - extensionsManager.initializeServicesAndRestHandler(restController, settingsModule, transportService, clusterService, settings); + initialize(extensionsManager); Environment environment = TestEnvironment.newEnvironment(settings); AnalysisRegistry emptyAnalysisRegistry = new AnalysisRegistry( @@ -696,4 +762,16 @@ public void testOnIndexModule() throws Exception { } } + private void initialize(ExtensionsManager extensionsManager) { + transportService.start(); + transportService.acceptIncomingRequests(); + extensionsManager.initializeServicesAndRestHandler( + restController, + settingsModule, + transportService, + clusterService, + settings, + client + ); + } } diff --git a/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java b/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java index ed36cc5290bb1..eb59c80ac6461 100644 --- a/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java @@ -9,6 +9,7 @@ package org.opensearch.extensions; import org.junit.Before; +import org.opensearch.action.admin.indices.create.AutoCreateAction.TransportAction; import org.opensearch.common.collect.Map; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; @@ -21,7 +22,7 @@ public class RegisterTransportActionsRequestTests extends OpenSearchTestCase { @Before public void setup() { - this.originalRequest = new RegisterTransportActionsRequest(Map.of("testAction", Map.class)); + this.originalRequest = new RegisterTransportActionsRequest("extension-uniqueId", Map.of("testAction", TransportAction.class)); } public void testRegisterTransportActionsRequest() throws IOException { @@ -37,6 +38,9 @@ public void testRegisterTransportActionsRequest() throws IOException { } public void testToString() { - assertEquals(originalRequest.toString(), "TransportActionsRequest{actions={testAction=class org.opensearch.common.collect.Map}}"); + assertEquals( + originalRequest.toString(), + "TransportActionsRequest{uniqueId=extension-uniqueId, actions={testAction=class org.opensearch.action.admin.indices.create.AutoCreateAction$TransportAction}}" + ); } } diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java new file mode 100644 index 0000000000000..2d4f2b5d8aa66 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; + +public class ExtensionActionRequestTests extends OpenSearchTestCase { + + public void testExtensionActionRequest() throws Exception { + String expectedAction = "test-action"; + byte[] expectedRequestBytes = "request-bytes".getBytes(StandardCharsets.UTF_8); + ExtensionActionRequest request = new ExtensionActionRequest(expectedAction, expectedRequestBytes); + + assertEquals(expectedAction, request.getAction()); + assertEquals(expectedRequestBytes, request.getRequestBytes()); + + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes())); + request = new ExtensionActionRequest(in); + + assertEquals(expectedAction, request.getAction()); + assertArrayEquals(expectedRequestBytes, request.getRequestBytes()); + assertNull(request.validate()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java new file mode 100644 index 0000000000000..5ec8c16027da2 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; + +public class ExtensionActionResponseTests extends OpenSearchTestCase { + + public void testExtensionActionResponse() throws Exception { + byte[] expectedResponseBytes = "response-bytes".getBytes(StandardCharsets.UTF_8); + ExtensionActionResponse response = new ExtensionActionResponse(expectedResponseBytes); + + assertEquals(expectedResponseBytes, response.getResponseBytes()); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes())); + response = new ExtensionActionResponse(in); + assertArrayEquals(expectedResponseBytes, response.getResponseBytes()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java new file mode 100644 index 0000000000000..15e7320ba7556 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; + +public class ExtensionHandleTransportRequestTests extends OpenSearchTestCase { + public void testExtensionHandleTransportRequest() throws Exception { + String expectedAction = "test-action"; + byte[] expectedRequestBytes = "request-bytes".getBytes(StandardCharsets.UTF_8); + ExtensionHandleTransportRequest request = new ExtensionHandleTransportRequest(expectedAction, expectedRequestBytes); + + assertEquals(expectedAction, request.getAction()); + assertEquals(expectedRequestBytes, request.getRequestBytes()); + + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes())); + request = new ExtensionHandleTransportRequest(in); + + assertEquals(expectedAction, request.getAction()); + assertArrayEquals(expectedRequestBytes, request.getRequestBytes()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionProxyActionTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionProxyActionTests.java new file mode 100644 index 0000000000000..3719c29090287 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionProxyActionTests.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.test.OpenSearchTestCase; + +public class ExtensionProxyActionTests extends OpenSearchTestCase { + public void testExtensionProxyAction() { + assertEquals("cluster:internal/extensions", ExtensionProxyAction.NAME); + assertEquals(ExtensionProxyAction.class, ExtensionProxyAction.INSTANCE.getClass()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java new file mode 100644 index 0000000000000..c3d6372a4f6b8 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -0,0 +1,181 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.admin.indices.create.AutoCreateAction.TransportAction; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.extensions.AcknowledgedResponse; +import org.opensearch.extensions.RegisterTransportActionsRequest; +import org.opensearch.extensions.rest.RestSendToExtensionActionTests; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ActionNotFoundTransportException; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.nio.MockNioTransport; + +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class ExtensionTransportActionsHandlerTests extends OpenSearchTestCase { + private TransportService transportService; + private MockNioTransport transport; + private DiscoveryExtensionNode discoveryExtensionNode; + private ExtensionTransportActionsHandler extensionTransportActionsHandler; + private NodeClient client; + private final ThreadPool threadPool = new TestThreadPool(RestSendToExtensionActionTests.class.getSimpleName()); + + @Before + public void setup() throws Exception { + Settings settings = Settings.builder().put("cluster.name", "test").build(); + transport = new MockNioTransport( + settings, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService() + ); + transportService = new MockTransportService( + settings, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (boundAddress) -> new DiscoveryNode( + "test_node", + "test_node", + boundAddress.publishAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ), + null, + Collections.emptySet() + ); + discoveryExtensionNode = new DiscoveryExtensionNode( + "firstExtension", + "uniqueid1", + "uniqueid1", + "myIndependentPluginHost1", + "127.0.0.0", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new HashMap(), + Version.fromString("3.0.0"), + new PluginInfo( + "firstExtension", + "Fake description 1", + "0.0.7", + Version.fromString("3.0.0"), + "14", + "fakeClass1", + new ArrayList(), + false + ), + Collections.emptyList() + ); + client = new NoOpNodeClient(this.getTestName()); + extensionTransportActionsHandler = new ExtensionTransportActionsHandler( + Map.of("uniqueid1", discoveryExtensionNode), + transportService, + client + ); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + transportService.close(); + client.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + + public void testRegisterAction() { + String action = "test-action"; + extensionTransportActionsHandler.registerAction(action, discoveryExtensionNode); + assertEquals(discoveryExtensionNode, extensionTransportActionsHandler.getExtension(action)); + + // Test duplicate action registration + expectThrows(IllegalArgumentException.class, () -> extensionTransportActionsHandler.registerAction(action, discoveryExtensionNode)); + assertEquals(discoveryExtensionNode, extensionTransportActionsHandler.getExtension(action)); + } + + public void testRegisterTransportActionsRequest() { + String action = "test-action"; + RegisterTransportActionsRequest request = new RegisterTransportActionsRequest("uniqueid1", Map.of(action, TransportAction.class)); + AcknowledgedResponse response = (AcknowledgedResponse) extensionTransportActionsHandler.handleRegisterTransportActionsRequest( + request + ); + assertTrue(response.getStatus()); + assertEquals(discoveryExtensionNode, extensionTransportActionsHandler.getExtension(action)); + + // Test duplicate action registration + response = (AcknowledgedResponse) extensionTransportActionsHandler.handleRegisterTransportActionsRequest(request); + assertFalse(response.getStatus()); + } + + public void testTransportActionRequestFromExtension() throws InterruptedException { + String action = "test-action"; + byte[] requestBytes = "requestBytes".getBytes(StandardCharsets.UTF_8); + TransportActionRequestFromExtension request = new TransportActionRequestFromExtension(action, requestBytes, "uniqueid1"); + // NoOpNodeClient returns null as response + expectThrows(NullPointerException.class, () -> extensionTransportActionsHandler.handleTransportActionRequestFromExtension(request)); + } + + public void testSendTransportRequestToExtension() throws InterruptedException { + String action = "test-action"; + byte[] requestBytes = "request-bytes".getBytes(StandardCharsets.UTF_8); + ExtensionActionRequest request = new ExtensionActionRequest(action, requestBytes); + + // Action not registered, expect exception + expectThrows( + ActionNotFoundTransportException.class, + () -> extensionTransportActionsHandler.sendTransportRequestToExtension(request) + ); + + // Register Action + RegisterTransportActionsRequest registerRequest = new RegisterTransportActionsRequest( + "uniqueid1", + Map.of(action, TransportAction.class) + ); + AcknowledgedResponse response = (AcknowledgedResponse) extensionTransportActionsHandler.handleRegisterTransportActionsRequest( + registerRequest + ); + assertTrue(response.getStatus()); + + ExtensionActionResponse extensionResponse = extensionTransportActionsHandler.sendTransportRequestToExtension(request); + assertEquals( + "Request failed: [firstExtension][127.0.0.0:9300] Node not connected", + new String(extensionResponse.getResponseBytes(), StandardCharsets.UTF_8) + ); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java b/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java new file mode 100644 index 0000000000000..a8ef5372800d9 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; + +public class TransportActionRequestFromExtensionTests extends OpenSearchTestCase { + public void testTransportActionRequestFromExtension() throws Exception { + String expectedAction = "test-action"; + byte[] expectedRequestBytes = "request-bytes".getBytes(StandardCharsets.UTF_8); + String uniqueId = "test-uniqueId"; + TransportActionRequestFromExtension request = new TransportActionRequestFromExtension( + expectedAction, + expectedRequestBytes, + uniqueId + ); + + assertEquals(expectedAction, request.getAction()); + assertEquals(expectedRequestBytes, request.getRequestBytes()); + assertEquals(uniqueId, request.getUniqueId()); + + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes())); + request = new TransportActionRequestFromExtension(in); + + assertEquals(expectedAction, request.getAction()); + assertArrayEquals(expectedRequestBytes, request.getRequestBytes()); + assertEquals(uniqueId, request.getUniqueId()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/action/TransportActionResponseToExtensionTests.java b/server/src/test/java/org/opensearch/extensions/action/TransportActionResponseToExtensionTests.java new file mode 100644 index 0000000000000..070feaa240d98 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/action/TransportActionResponseToExtensionTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.action; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class TransportActionResponseToExtensionTests extends OpenSearchTestCase { + public void testTransportActionRequestToExtension() throws IOException { + byte[] expectedResponseBytes = "response-bytes".getBytes(StandardCharsets.UTF_8); + TransportActionResponseToExtension response = new TransportActionResponseToExtension(expectedResponseBytes); + + assertEquals(expectedResponseBytes, response.getResponseBytes()); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes())); + response = new TransportActionResponseToExtension(in); + + assertArrayEquals(expectedResponseBytes, response.getResponseBytes()); + } + + public void testSetBytes() { + byte[] expectedResponseBytes = "response-bytes".getBytes(StandardCharsets.UTF_8); + byte[] expectedEmptyBytes = new byte[0]; + TransportActionResponseToExtension response = new TransportActionResponseToExtension(expectedEmptyBytes); + assertArrayEquals(expectedEmptyBytes, response.getResponseBytes()); + + response.setResponseBytes(expectedResponseBytes); + assertArrayEquals(expectedResponseBytes, response.getResponseBytes()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java new file mode 100644 index 0000000000000..9f09735dbe38d --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java @@ -0,0 +1,262 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import org.opensearch.rest.RestStatus; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest.Method; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.charset.StandardCharsets; +import java.security.Principal; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import static java.util.Map.entry; + +public class ExtensionRestRequestTests extends OpenSearchTestCase { + + private Method expectedMethod; + private String expectedPath; + Map expectedParams; + XContentType expectedContentType; + BytesReference expectedContent; + String extensionUniqueId1; + Principal userPrincipal; + // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity + String extensionTokenProcessor; + String expectedRequestIssuerIdentity; + NamedWriteableRegistry registry; + + public void setUp() throws Exception { + super.setUp(); + expectedMethod = Method.GET; + expectedPath = "/test/uri"; + expectedParams = Map.ofEntries(entry("foo", "bar"), entry("baz", "42")); + expectedContentType = XContentType.JSON; + expectedContent = new BytesArray("{\"key\": \"value\"}".getBytes(StandardCharsets.UTF_8)); + extensionUniqueId1 = "ext_1"; + userPrincipal = () -> "user1"; + extensionTokenProcessor = "placeholder_extension_token_processor"; + expectedRequestIssuerIdentity = "placeholder_request_issuer_identity"; + } + + public void testExtensionRestRequest() throws Exception { + ExtensionRestRequest request = new ExtensionRestRequest( + expectedMethod, + expectedPath, + expectedParams, + expectedContentType, + expectedContent, + expectedRequestIssuerIdentity + ); + + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + + assertEquals(expectedParams, request.params()); + assertEquals(Collections.emptyList(), request.consumedParams()); + assertTrue(request.hasParam("foo")); + assertFalse(request.hasParam("bar")); + assertEquals("bar", request.param("foo")); + assertEquals("baz", request.param("bar", "baz")); + assertEquals(42L, request.paramAsLong("baz", 0L)); + assertEquals(0L, request.paramAsLong("bar", 0L)); + assertTrue(request.consumedParams().contains("foo")); + assertTrue(request.consumedParams().contains("baz")); + + assertEquals(expectedContentType, request.getXContentType()); + assertTrue(request.hasContent()); + assertFalse(request.isContentConsumed()); + assertEquals(expectedContent, request.content()); + assertTrue(request.isContentConsumed()); + + XContentParser parser = request.contentParser(NamedXContentRegistry.EMPTY); + Map contentMap = parser.mapStrings(); + assertEquals("value", contentMap.get("key")); + + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { + request = new ExtensionRestRequest(nameWritableAwareIn); + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + assertEquals(expectedParams, request.params()); + assertEquals(expectedContent, request.content()); + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + } + } + } + } + + public void testExtensionRestRequestWithNoContent() throws Exception { + ExtensionRestRequest request = new ExtensionRestRequest( + expectedMethod, + expectedPath, + expectedParams, + null, + new BytesArray(new byte[0]), + expectedRequestIssuerIdentity + ); + + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + assertEquals(expectedParams, request.params()); + assertNull(request.getXContentType()); + assertEquals(0, request.content().length()); + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + + final ExtensionRestRequest requestWithNoContent = request; + assertThrows(OpenSearchParseException.class, () -> requestWithNoContent.contentParser(NamedXContentRegistry.EMPTY)); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { + request = new ExtensionRestRequest(nameWritableAwareIn); + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + assertEquals(expectedParams, request.params()); + assertNull(request.getXContentType()); + assertEquals(0, request.content().length()); + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + + final ExtensionRestRequest requestWithNoContentType = request; + assertThrows(OpenSearchParseException.class, () -> requestWithNoContentType.contentParser(NamedXContentRegistry.EMPTY)); + } + } + } + } + + public void testExtensionRestRequestWithPlainTextContent() throws Exception { + BytesReference expectedText = new BytesArray("Plain text"); + + ExtensionRestRequest request = new ExtensionRestRequest( + expectedMethod, + expectedPath, + expectedParams, + null, + expectedText, + expectedRequestIssuerIdentity + ); + + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + assertEquals(expectedParams, request.params()); + assertNull(request.getXContentType()); + assertEquals(expectedText, request.content()); + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { + request = new ExtensionRestRequest(nameWritableAwareIn); + assertEquals(expectedMethod, request.method()); + assertEquals(expectedPath, request.path()); + assertEquals(expectedParams, request.params()); + assertNull(request.getXContentType()); + assertEquals(expectedText, request.content()); + assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + } + } + } + } + + public void testRestExecuteOnExtensionResponse() throws Exception { + RestStatus expectedStatus = RestStatus.OK; + String expectedContentType = BytesRestResponse.TEXT_CONTENT_TYPE; + String expectedResponse = "Test response"; + byte[] expectedResponseBytes = expectedResponse.getBytes(StandardCharsets.UTF_8); + + RestExecuteOnExtensionResponse response = new RestExecuteOnExtensionResponse( + expectedStatus, + expectedContentType, + expectedResponseBytes, + Collections.emptyMap(), + Collections.emptyList(), + false + ); + + assertEquals(expectedStatus, response.getStatus()); + assertEquals(expectedContentType, response.getContentType()); + assertArrayEquals(expectedResponseBytes, response.getContent()); + assertEquals(0, response.getHeaders().size()); + assertEquals(0, response.getConsumedParams().size()); + assertFalse(response.isContentConsumed()); + + String headerKey = "foo"; + List headerValueList = List.of("bar", "baz"); + Map> expectedHeaders = Map.of(headerKey, headerValueList); + List expectedConsumedParams = List.of("foo", "bar"); + + response = new RestExecuteOnExtensionResponse( + expectedStatus, + expectedContentType, + expectedResponseBytes, + expectedHeaders, + expectedConsumedParams, + true + ); + + assertEquals(expectedStatus, response.getStatus()); + assertEquals(expectedContentType, response.getContentType()); + assertArrayEquals(expectedResponseBytes, response.getContent()); + + assertEquals(1, response.getHeaders().keySet().size()); + assertTrue(response.getHeaders().containsKey(headerKey)); + + List fooList = response.getHeaders().get(headerKey); + assertEquals(2, fooList.size()); + assertTrue(fooList.containsAll(headerValueList)); + + assertEquals(2, response.getConsumedParams().size()); + assertTrue(response.getConsumedParams().containsAll(expectedConsumedParams)); + assertTrue(response.isContentConsumed()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + response = new RestExecuteOnExtensionResponse(in); + + assertEquals(expectedStatus, response.getStatus()); + assertEquals(expectedContentType, response.getContentType()); + assertArrayEquals(expectedResponseBytes, response.getContent()); + + assertEquals(1, response.getHeaders().keySet().size()); + assertTrue(response.getHeaders().containsKey(headerKey)); + + fooList = response.getHeaders().get(headerKey); + assertEquals(2, fooList.size()); + assertTrue(fooList.containsAll(headerValueList)); + + assertEquals(2, response.getConsumedParams().size()); + assertTrue(response.getConsumedParams().containsAll(expectedConsumedParams)); + assertTrue(response.isContentConsumed()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java new file mode 100644 index 0000000000000..82ae61b02cb32 --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions.rest; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; + +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest.Method; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; +import static org.opensearch.rest.RestStatus.ACCEPTED; +import static org.opensearch.rest.RestStatus.OK; + +public class ExtensionRestResponseTests extends OpenSearchTestCase { + + private static final String OCTET_CONTENT_TYPE = "application/octet-stream"; + private static final String JSON_CONTENT_TYPE = "application/json; charset=UTF-8"; + + private String testText; + private byte[] testBytes; + + @Override + public void setUp() throws Exception { + super.setUp(); + testText = "plain text"; + testBytes = new byte[] { 1, 2 }; + } + + private ExtensionRestRequest generateTestRequest() { + ExtensionRestRequest request = new ExtensionRestRequest( + Method.GET, + "/foo", + Collections.emptyMap(), + null, + new BytesArray("Text Content"), + null + ); + // consume params "foo" and "bar" + request.param("foo"); + request.param("bar"); + // consume content + request.content(); + return request; + } + + public void testConstructorWithBuilder() throws IOException { + XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder.startObject(); + builder.field("status", ACCEPTED); + builder.endObject(); + ExtensionRestRequest request = generateTestRequest(); + ExtensionRestResponse response = new ExtensionRestResponse(request, OK, builder); + + assertEquals(OK, response.status()); + assertEquals(JSON_CONTENT_TYPE, response.contentType()); + assertEquals("{\"status\":\"ACCEPTED\"}", response.content().utf8ToString()); + for (String param : response.getConsumedParams()) { + assertTrue(request.consumedParams().contains(param)); + } + assertTrue(request.isContentConsumed()); + } + + public void testConstructorWithPlainText() { + ExtensionRestRequest request = generateTestRequest(); + ExtensionRestResponse response = new ExtensionRestResponse(request, OK, testText); + + assertEquals(OK, response.status()); + assertEquals(TEXT_CONTENT_TYPE, response.contentType()); + assertEquals(testText, response.content().utf8ToString()); + for (String param : response.getConsumedParams()) { + assertTrue(request.consumedParams().contains(param)); + } + assertTrue(request.isContentConsumed()); + } + + public void testConstructorWithText() { + ExtensionRestRequest request = generateTestRequest(); + ExtensionRestResponse response = new ExtensionRestResponse(request, OK, TEXT_CONTENT_TYPE, testText); + + assertEquals(OK, response.status()); + assertEquals(TEXT_CONTENT_TYPE, response.contentType()); + assertEquals(testText, response.content().utf8ToString()); + + for (String param : response.getConsumedParams()) { + assertTrue(request.consumedParams().contains(param)); + } + assertTrue(request.isContentConsumed()); + } + + public void testConstructorWithByteArray() { + ExtensionRestRequest request = generateTestRequest(); + ExtensionRestResponse response = new ExtensionRestResponse(request, OK, OCTET_CONTENT_TYPE, testBytes); + + assertEquals(OK, response.status()); + assertEquals(OCTET_CONTENT_TYPE, response.contentType()); + assertArrayEquals(testBytes, BytesReference.toBytes(response.content())); + for (String param : response.getConsumedParams()) { + assertTrue(request.consumedParams().contains(param)); + } + assertTrue(request.isContentConsumed()); + } + + public void testConstructorWithBytesReference() { + ExtensionRestRequest request = generateTestRequest(); + ExtensionRestResponse response = new ExtensionRestResponse( + request, + OK, + OCTET_CONTENT_TYPE, + BytesReference.fromByteBuffer(ByteBuffer.wrap(testBytes, 0, 2)) + ); + + assertEquals(OK, response.status()); + assertEquals(OCTET_CONTENT_TYPE, response.contentType()); + assertArrayEquals(testBytes, BytesReference.toBytes(response.content())); + for (String param : response.getConsumedParams()) { + assertTrue(request.consumedParams().contains(param)); + } + assertTrue(request.isContentConsumed()); + } +} diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestExecuteOnExtensionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestExecuteOnExtensionTests.java deleted file mode 100644 index 98521ddcf1e26..0000000000000 --- a/server/src/test/java/org/opensearch/extensions/rest/RestExecuteOnExtensionTests.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.extensions.rest; - -import org.opensearch.rest.RestStatus; -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.rest.BytesRestResponse; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.test.OpenSearchTestCase; - -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; - -public class RestExecuteOnExtensionTests extends OpenSearchTestCase { - - public void testRestExecuteOnExtensionRequest() throws Exception { - Method expectedMethod = Method.GET; - String expectedUri = "/test/uri"; - RestExecuteOnExtensionRequest request = new RestExecuteOnExtensionRequest(expectedMethod, expectedUri); - - assertEquals(expectedMethod, request.getMethod()); - assertEquals(expectedUri, request.getUri()); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - out.flush(); - try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { - request = new RestExecuteOnExtensionRequest(in); - - assertEquals(expectedMethod, request.getMethod()); - assertEquals(expectedUri, request.getUri()); - } - } - } - - public void testRestExecuteOnExtensionResponse() throws Exception { - RestStatus expectedStatus = RestStatus.OK; - String expectedContentType = BytesRestResponse.TEXT_CONTENT_TYPE; - String expectedResponse = "Test response"; - byte[] expectedResponseBytes = expectedResponse.getBytes(StandardCharsets.UTF_8); - - RestExecuteOnExtensionResponse response = new RestExecuteOnExtensionResponse(expectedStatus, expectedResponse); - - assertEquals(expectedStatus, response.getStatus()); - assertEquals(expectedContentType, response.getContentType()); - assertArrayEquals(expectedResponseBytes, response.getContent()); - assertEquals(0, response.getHeaders().size()); - - String headerKey = "foo"; - List headerValueList = List.of("bar", "baz"); - Map> expectedHeaders = Map.of(headerKey, headerValueList); - - response = new RestExecuteOnExtensionResponse(expectedStatus, expectedContentType, expectedResponseBytes, expectedHeaders); - - assertEquals(expectedStatus, response.getStatus()); - assertEquals(expectedContentType, response.getContentType()); - assertArrayEquals(expectedResponseBytes, response.getContent()); - - assertEquals(1, expectedHeaders.keySet().size()); - assertTrue(expectedHeaders.containsKey(headerKey)); - - List fooList = expectedHeaders.get(headerKey); - assertEquals(2, fooList.size()); - assertTrue(fooList.containsAll(headerValueList)); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - response.writeTo(out); - out.flush(); - try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { - response = new RestExecuteOnExtensionResponse(in); - - assertEquals(expectedStatus, response.getStatus()); - assertEquals(expectedContentType, response.getContentType()); - assertArrayEquals(expectedResponseBytes, response.getContent()); - - assertEquals(1, expectedHeaders.keySet().size()); - assertTrue(expectedHeaders.containsKey(headerKey)); - - fooList = expectedHeaders.get(headerKey); - assertEquals(2, fooList.size()); - assertTrue(fooList.containsAll(headerValueList)); - } - } - } -} diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 2a593a8d251e9..97eeae8fb95af 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -93,7 +93,8 @@ public void setup() throws Exception { "fakeClass1", new ArrayList(), false - ) + ), + Collections.emptyList() ); } diff --git a/server/src/test/resources/config/extensions.yml b/server/src/test/resources/config/extensions.yml index 6264e9630ad60..e02a2913385d9 100644 --- a/server/src/test/resources/config/extensions.yml +++ b/server/src/test/resources/config/extensions.yml @@ -7,6 +7,11 @@ extensions: version: '3.0.0' - name: "secondExtension" uniqueId: 'uniqueid2' + dependencies: + - name: 'uniqueid0' + version: '2.0.0' + - name: 'uniqueid1' + version: '3.0.0' hostName: 'myIndependentPluginHost2' hostAddress: '127.0.0.1' port: '9301' From b8c74bd4009a30b8b350328dc4d6d1bbe90a9993 Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 27 Dec 2022 00:08:50 +0530 Subject: [PATCH 02/13] Add transport action for primary term validation for remote-backed indices (#5616) Add transport action for primary term validation for remote-backed indices Signed-off-by: Ashish Singh --- .../action/bulk/TransportShardBulkAction.java | 238 +++++++++++++++++- .../replication/FanoutReplicationProxy.java | 22 +- .../ReplicationModeAwareProxy.java | 39 ++- .../replication/ReplicationOperation.java | 13 +- .../support/replication/ReplicationProxy.java | 54 +++- .../replication/ReplicationProxyFactory.java | 29 --- .../replication/ReplicationProxyRequest.java | 18 +- .../TransportReplicationAction.java | 53 +++- .../replication/TransportWriteAction.java | 2 +- .../checkpoint/PublishCheckpointAction.java | 2 +- ...portVerifyShardBeforeCloseActionTests.java | 29 ++- .../flush/TransportShardFlushActionTests.java | 56 +++++ ...sportVerifyShardIndexBlockActionTests.java | 56 +++++ .../TransportShardRefreshActionTests.java | 56 +++++ .../bulk/TransportShardBulkActionTests.java | 178 +++++++++++++ ...TransportResyncReplicationActionTests.java | 34 +++ .../ReplicationOperationTests.java | 18 +- .../GlobalCheckpointSyncActionTests.java | 28 +++ ...tentionLeaseBackgroundSyncActionTests.java | 27 ++ .../seqno/RetentionLeaseSyncActionTests.java | 29 +++ .../PublishCheckpointActionTests.java | 28 +++ ...enSearchIndexLevelReplicationTestCase.java | 2 +- 22 files changed, 935 insertions(+), 76 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 59f9042ec4a85..0657fab55b220 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -38,6 +38,7 @@ import org.apache.logging.log4j.util.MessageSupplier; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; @@ -46,25 +47,36 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.action.support.replication.ReplicationMode; +import org.opensearch.action.support.replication.ReplicationOperation; +import org.opensearch.action.support.replication.ReplicationTask; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.action.support.replication.TransportWriteAction; import org.opensearch.action.update.UpdateHelper; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.transport.NoNodeAvailableException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; @@ -78,24 +90,29 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; -import org.opensearch.action.support.replication.ReplicationMode; - /** * Performs shard-level bulk (index, delete or update) operations * @@ -117,6 +134,15 @@ public class TransportShardBulkAction extends TransportWriteAction listener = new ChannelActionListener<>(channel, transportPrimaryTermValidationAction, request); + final ShardId shardId = request.getShardId(); + assert shardId != null : "request shardId must be set"; + IndexShard replica = getIndexShard(shardId); + try { + new PrimaryTermValidationReplicaAction(listener, replica, (ReplicationTask) task, request).run(); + } catch (RuntimeException e) { + listener.onFailure(e); + } + } + + /** + * This action is the primary term validation action which is used for doing primary term validation with replicas. + * This is only applicable for TransportShardBulkAction because all writes (delete/update/single write/bulk) + * ultimately boils down to TransportShardBulkAction and isolated primary could continue to acknowledge if it is not + * aware that the primary has changed. This helps achieve the same. More details in java doc of + * {@link TransportShardBulkAction#transportPrimaryTermValidationAction}. + * + * @opensearch.internal + */ + private static final class PrimaryTermValidationReplicaAction extends AbstractRunnable implements ActionListener { + + private final ActionListener onCompletionListener; + private final IndexShard replica; + private final ReplicationTask task; + private final PrimaryTermValidationRequest request; + + public PrimaryTermValidationReplicaAction( + ActionListener onCompletionListener, + IndexShard replica, + ReplicationTask task, + PrimaryTermValidationRequest request + ) { + this.onCompletionListener = onCompletionListener; + this.replica = replica; + this.task = task; + this.request = request; + } + + @Override + public void onResponse(Releasable releasable) { + setPhase(task, "finished"); + onCompletionListener.onResponse(new ReplicaResponse(SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED)); + } + + @Override + public void onFailure(Exception e) { + setPhase(task, "failed"); + onCompletionListener.onFailure(e); + } + + @Override + protected void doRun() throws Exception { + setPhase(task, "primary-term-validation"); + final String actualAllocationId = this.replica.routingEntry().allocationId().getId(); + if (actualAllocationId.equals(request.getTargetAllocationID()) == false) { + throw new ShardNotFoundException( + this.replica.shardId(), + "expected allocation id [{}] but found [{}]", + request.getTargetAllocationID(), + actualAllocationId + ); + } + // Check operation primary term against the incoming primary term + // If the request primary term is low, then trigger lister failure + if (request.getPrimaryTerm() < replica.getOperationPrimaryTerm()) { + final String message = String.format( + Locale.ROOT, + "%s operation primary term [%d] is too old (current [%d])", + request.getShardId(), + request.getPrimaryTerm(), + replica.getOperationPrimaryTerm() + ); + onFailure(new IllegalStateException(message)); + } else { + onResponse(null); + } + } + } + + /** + * Primary term validation request sent to a specific allocation id + * + * @opensearch.internal + */ + protected static final class PrimaryTermValidationRequest extends TransportRequest { + + /** + * {@link AllocationId#getId()} of the shard this request is sent to + **/ + private final String targetAllocationID; + private final long primaryTerm; + private final ShardId shardId; + + public PrimaryTermValidationRequest(String targetAllocationID, long primaryTerm, ShardId shardId) { + this.targetAllocationID = Objects.requireNonNull(targetAllocationID); + this.primaryTerm = primaryTerm; + this.shardId = Objects.requireNonNull(shardId); + } + + public PrimaryTermValidationRequest(StreamInput in) throws IOException { + super(in); + targetAllocationID = in.readString(); + primaryTerm = in.readVLong(); + shardId = new ShardId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(targetAllocationID); + out.writeVLong(primaryTerm); + shardId.writeTo(out); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ReplicationTask(id, type, action, getDescription(), parentTaskId, headers); + } + + public String getTargetAllocationID() { + return targetAllocationID; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + public ShardId getShardId() { + return shardId; + } + + @Override + public String getDescription() { + return toString(); + } + + @Override + public String toString() { + return "PrimaryTermValidationRequest [" + + shardId + + "] for targetAllocationID [" + + targetAllocationID + + "] with primaryTerm [" + + primaryTerm + + "]"; + } + } + + @Override + protected ReplicationOperation.Replicas primaryTermValidationReplicasProxy() { + return new PrimaryTermValidationProxy(); + } + + /** + * This {@link org.opensearch.action.support.replication.TransportReplicationAction.ReplicasProxy} implementation is + * used for primary term validation and is only relevant for TransportShardBulkAction replication action. + * + * @opensearch.internal + */ + private final class PrimaryTermValidationProxy extends WriteActionReplicasProxy { + + @Override + public void performOn( + ShardRouting replica, + BulkShardRequest request, + long primaryTerm, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + ActionListener listener + ) { + String nodeId = replica.currentNodeId(); + final DiscoveryNode node = clusterService.state().nodes().get(nodeId); + if (node == null) { + listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]")); + return; + } + final PrimaryTermValidationRequest validationRequest = new PrimaryTermValidationRequest( + replica.allocationId().getId(), + primaryTerm, + replica.shardId() + ); + final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>( + listener, + ReplicaResponse::new + ); + transportService.sendRequest(node, transportPrimaryTermValidationAction, validationRequest, transportOptions, handler); + } } @Override @@ -196,7 +428,7 @@ protected long primaryOperationSize(BulkShardRequest request) { } @Override - protected ReplicationMode getReplicationMode(IndexShard indexShard) { + public ReplicationMode getReplicationMode(IndexShard indexShard) { if (indexShard.isRemoteTranslogEnabled()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } diff --git a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java index 2980df4c1c0af..51b95468d6b25 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java @@ -8,15 +8,35 @@ package org.opensearch.action.support.replication; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; +import org.opensearch.action.support.replication.ReplicationOperation.Replicas; import org.opensearch.cluster.routing.ShardRouting; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + /** * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing if * it is not the primary and has replication mode as {@link ReplicationMode#FULL_REPLICATION}. * * @opensearch.internal */ -public class FanoutReplicationProxy extends ReplicationProxy { +public class FanoutReplicationProxy> extends ReplicationProxy { + + public FanoutReplicationProxy(Replicas replicasProxy) { + super(replicasProxy); + } + + @Override + protected void performOnReplicaProxy( + ReplicationProxyRequest proxyRequest, + ReplicationMode replicationMode, + BiConsumer>, ReplicationProxyRequest> performOnReplicaConsumer + ) { + assert replicationMode == ReplicationMode.FULL_REPLICATION : "FanoutReplicationProxy allows only full replication mode"; + performOnReplicaConsumer.accept(getReplicasProxyConsumer(fullReplicationProxy, proxyRequest), proxyRequest); + } @Override ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java index fa28e99d5696f..26d3b3c2f64ef 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -8,9 +8,13 @@ package org.opensearch.action.support.replication; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; import org.opensearch.cluster.routing.ShardRouting; import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing basis @@ -18,13 +22,40 @@ * * @opensearch.internal */ -public class ReplicationModeAwareProxy extends ReplicationProxy { +public class ReplicationModeAwareProxy> extends ReplicationProxy { private final ReplicationMode replicationModeOverride; - public ReplicationModeAwareProxy(ReplicationMode replicationModeOverride) { - assert Objects.nonNull(replicationModeOverride); - this.replicationModeOverride = replicationModeOverride; + /** + * This ReplicasProxy is used for performing primary term validation. + */ + private final ReplicationOperation.Replicas primaryTermValidationProxy; + + public ReplicationModeAwareProxy( + ReplicationMode replicationModeOverride, + ReplicationOperation.Replicas replicasProxy, + ReplicationOperation.Replicas primaryTermValidationProxy + ) { + super(replicasProxy); + this.replicationModeOverride = Objects.requireNonNull(replicationModeOverride); + this.primaryTermValidationProxy = Objects.requireNonNull(primaryTermValidationProxy); + } + + @Override + protected void performOnReplicaProxy( + ReplicationProxyRequest proxyRequest, + ReplicationMode replicationMode, + BiConsumer>, ReplicationProxyRequest> performOnReplicaConsumer + ) { + assert replicationMode == ReplicationMode.FULL_REPLICATION || replicationMode == ReplicationMode.PRIMARY_TERM_VALIDATION; + + Consumer> replicasProxyConsumer; + if (replicationMode == ReplicationMode.FULL_REPLICATION) { + replicasProxyConsumer = getReplicasProxyConsumer(fullReplicationProxy, proxyRequest); + } else { + replicasProxyConsumer = getReplicasProxyConsumer(primaryTermValidationProxy, proxyRequest); + } + performOnReplicaConsumer.accept(replicasProxyConsumer, proxyRequest); } @Override diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 1a6a5a9245eb2..944729df2ab1e 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -66,6 +66,7 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; import java.util.function.LongSupplier; /** @@ -237,17 +238,19 @@ private void performOnReplicas( globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, pendingReplicationActions, - replicaRequest + replicaRequest, + primaryTerm ).build(); replicationProxy.performOnReplicaProxy(proxyRequest, this::performOnReplica); } } - private void performOnReplica(final ReplicationProxyRequest replicationProxyRequest) { + private void performOnReplica( + final Consumer> replicasProxyConsumer, + final ReplicationProxyRequest replicationProxyRequest + ) { final ShardRouting shard = replicationProxyRequest.getShardRouting(); final ReplicaRequest replicaRequest = replicationProxyRequest.getReplicaRequest(); - final long globalCheckpoint = replicationProxyRequest.getGlobalCheckpoint(); - final long maxSeqNoOfUpdatesOrDeletes = replicationProxyRequest.getMaxSeqNoOfUpdatesOrDeletes(); final PendingReplicationActions pendingReplicationActions = replicationProxyRequest.getPendingReplicationActions(); if (logger.isTraceEnabled()) { @@ -319,7 +322,7 @@ public String toString() { @Override public void tryAction(ActionListener listener) { - replicasProxy.performOn(shard, replicaRequest, primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, listener); + replicasProxyConsumer.accept(listener); } @Override diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java index e098ea1aed960..20f7b5fc6a586 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java @@ -8,8 +8,12 @@ package org.opensearch.action.support.replication; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; +import org.opensearch.action.support.replication.ReplicationOperation.Replicas; import org.opensearch.cluster.routing.ShardRouting; +import java.util.function.BiConsumer; import java.util.function.Consumer; /** @@ -18,27 +22,51 @@ * * @opensearch.internal */ -public abstract class ReplicationProxy { +public abstract class ReplicationProxy> { + + /** + * This is the replicas proxy which is used for full replication. + */ + protected final Replicas fullReplicationProxy; + + public ReplicationProxy(Replicas fullReplicationProxy) { + this.fullReplicationProxy = fullReplicationProxy; + } /** * Depending on the actual implementation and the passed {@link ReplicationMode}, the replication * mode is determined using which the replication request is performed on the replica or not. * * @param proxyRequest replication proxy request - * @param originalPerformOnReplicaConsumer original performOnReplica method passed as consumer + * @param performOnReplicaConsumer performOnReplicasProxy */ - public void performOnReplicaProxy( + final void performOnReplicaProxy( ReplicationProxyRequest proxyRequest, - Consumer> originalPerformOnReplicaConsumer + BiConsumer>, ReplicationProxyRequest> performOnReplicaConsumer ) { ReplicationMode replicationMode = determineReplicationMode(proxyRequest.getShardRouting(), proxyRequest.getPrimaryRouting()); // If the replication modes are 1. Logical replication or 2. Primary term validation, we let the call get performed on the // replica shard. - if (replicationMode == ReplicationMode.FULL_REPLICATION || replicationMode == ReplicationMode.PRIMARY_TERM_VALIDATION) { - originalPerformOnReplicaConsumer.accept(proxyRequest); + if (replicationMode == ReplicationMode.NO_REPLICATION) { + return; } + performOnReplicaProxy(proxyRequest, replicationMode, performOnReplicaConsumer); } + /** + * The implementor can decide the {@code Consumer>} basis the + * proxyRequest and replicationMode. This will ultimately make the calls to replica. + * + * @param proxyRequest replication proxy request + * @param replicationMode replication mode + * @param performOnReplicaConsumer performOnReplicasProxy + */ + protected abstract void performOnReplicaProxy( + ReplicationProxyRequest proxyRequest, + ReplicationMode replicationMode, + BiConsumer>, ReplicationProxyRequest> performOnReplicaConsumer + ); + /** * Determines what is the replication mode basis the constructor arguments of the implementation and the current * replication mode aware shard routing. @@ -48,4 +76,18 @@ public void performOnReplicaProxy( * @return the determined replication mode. */ abstract ReplicationMode determineReplicationMode(final ShardRouting shardRouting, final ShardRouting primaryRouting); + + protected Consumer> getReplicasProxyConsumer( + Replicas proxy, + ReplicationProxyRequest proxyRequest + ) { + return (listener) -> proxy.performOn( + proxyRequest.getShardRouting(), + proxyRequest.getReplicaRequest(), + proxyRequest.getPrimaryTerm(), + proxyRequest.getGlobalCheckpoint(), + proxyRequest.getMaxSeqNoOfUpdatesOrDeletes(), + listener + ); + } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java deleted file mode 100644 index a2bbf58fb9100..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.support.replication; - -import org.opensearch.index.shard.IndexShard; - -/** - * Factory that returns the {@link ReplicationProxy} instance basis the {@link ReplicationMode}. - * - * @opensearch.internal - */ -public class ReplicationProxyFactory { - - public static ReplicationProxy create( - final IndexShard indexShard, - final ReplicationMode replicationModeOverride - ) { - if (indexShard.isRemoteTranslogEnabled()) { - return new ReplicationModeAwareProxy<>(replicationModeOverride); - } - return new FanoutReplicationProxy<>(); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java index 180efd6f423c3..c65e55867f706 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java @@ -31,13 +31,16 @@ public class ReplicationProxyRequest { private final ReplicaRequest replicaRequest; + private final long primaryTerm; + private ReplicationProxyRequest( ShardRouting shardRouting, ShardRouting primaryRouting, long globalCheckpoint, long maxSeqNoOfUpdatesOrDeletes, PendingReplicationActions pendingReplicationActions, - ReplicaRequest replicaRequest + ReplicaRequest replicaRequest, + long primaryTerm ) { this.shardRouting = Objects.requireNonNull(shardRouting); this.primaryRouting = Objects.requireNonNull(primaryRouting); @@ -45,6 +48,7 @@ private ReplicationProxyRequest( this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; this.pendingReplicationActions = Objects.requireNonNull(pendingReplicationActions); this.replicaRequest = Objects.requireNonNull(replicaRequest); + this.primaryTerm = primaryTerm; } public ShardRouting getShardRouting() { @@ -71,6 +75,10 @@ public ReplicaRequest getReplicaRequest() { return replicaRequest; } + public long getPrimaryTerm() { + return primaryTerm; + } + /** * Builder of ReplicationProxyRequest. * @@ -84,6 +92,7 @@ public static class Builder { private final long maxSeqNoOfUpdatesOrDeletes; private final PendingReplicationActions pendingReplicationActions; private final ReplicaRequest replicaRequest; + private final long primaryTerm; public Builder( ShardRouting shardRouting, @@ -91,7 +100,8 @@ public Builder( long globalCheckpoint, long maxSeqNoOfUpdatesOrDeletes, PendingReplicationActions pendingReplicationActions, - ReplicaRequest replicaRequest + ReplicaRequest replicaRequest, + long primaryTerm ) { this.shardRouting = shardRouting; this.primaryRouting = primaryRouting; @@ -99,6 +109,7 @@ public Builder( this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; this.pendingReplicationActions = pendingReplicationActions; this.replicaRequest = replicaRequest; + this.primaryTerm = primaryTerm; } public ReplicationProxyRequest build() { @@ -108,7 +119,8 @@ public ReplicationProxyRequest build() { globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, pendingReplicationActions, - replicaRequest + replicaRequest, + primaryTerm ); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 0a0904a1b3aaa..e804aa31adb4e 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -46,6 +46,7 @@ import org.opensearch.action.support.ChannelActionListener; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.TransportActions; +import org.opensearch.action.support.replication.ReplicationOperation.Replicas; import org.opensearch.client.transport.NoNodeAvailableException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -254,17 +255,40 @@ private void runReroutePhase(Task task, Request request, ActionListener newReplicasProxy() { + protected Replicas newReplicasProxy() { return new ReplicasProxy(); } + /** + * This returns a ReplicaProxy that is used for primary term validation. The default behavior is that the control + * must not reach inside the performOn method for ReplicationActions. However, the implementations of the underlying + * class can provide primary term validation proxy that can allow performOn method to make calls to replica. + * + * @return Primary term validation replicas proxy. + */ + protected Replicas primaryTermValidationReplicasProxy() { + return new ReplicasProxy() { + @Override + public void performOn( + ShardRouting replica, + ReplicaRequest request, + long primaryTerm, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + ActionListener listener + ) { + throw new UnsupportedOperationException("Primary term validation is not available for " + actionName); + } + }; + } + /** * This method is used for defining the {@link ReplicationMode} override per {@link TransportReplicationAction}. * * @param indexShard index shard used to determining the policy. * @return the overridden replication mode. */ - protected ReplicationMode getReplicationMode(IndexShard indexShard) { + public ReplicationMode getReplicationMode(IndexShard indexShard) { if (indexShard.isRemoteTranslogEnabled()) { return ReplicationMode.NO_REPLICATION; } @@ -536,21 +560,24 @@ public void handleException(TransportException exp) { onCompletionListener.onResponse(response); }, e -> handleException(primaryShardReference, e)); + final Replicas replicasProxy = newReplicasProxy(); + final IndexShard indexShard = primaryShardReference.indexShard; + final Replicas termValidationProxy = primaryTermValidationReplicasProxy(); + new ReplicationOperation<>( primaryRequest.getRequest(), primaryShardReference, ActionListener.map(responseListener, result -> result.finalResponseIfSuccessful), - newReplicasProxy(), + replicasProxy, logger, threadPool, actionName, primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - ReplicationProxyFactory.create( - primaryShardReference.indexShard, - getReplicationMode(primaryShardReference.indexShard) - ) + indexShard.isRemoteTranslogEnabled() + ? new ReplicationModeAwareProxy<>(getReplicationMode(indexShard), replicasProxy, termValidationProxy) + : new FanoutReplicationProxy<>(replicasProxy) ).execute(); } } catch (Exception e) { @@ -830,7 +857,7 @@ protected void doRun() throws Exception { } } - private IndexShard getIndexShard(final ShardId shardId) { + protected IndexShard getIndexShard(final ShardId shardId) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); return indexService.getShard(shardId.id()); } @@ -1283,7 +1310,7 @@ public static class ReplicaResponse extends ActionResponse implements Replicatio private long localCheckpoint; private long globalCheckpoint; - ReplicaResponse(StreamInput in) throws IOException { + public ReplicaResponse(StreamInput in) throws IOException { super(in); localCheckpoint = in.readZLong(); globalCheckpoint = in.readZLong(); @@ -1338,7 +1365,7 @@ public int hashCode() { * * @opensearch.internal */ - protected class ReplicasProxy implements ReplicationOperation.Replicas { + protected class ReplicasProxy implements Replicas { @Override public void performOn( @@ -1401,7 +1428,9 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, l */ public static class ConcreteShardRequest extends TransportRequest { - /** {@link AllocationId#getId()} of the shard this request is sent to **/ + /** + * {@link AllocationId#getId()} of the shard this request is sent to + **/ private final String targetAllocationID; private final long primaryTerm; private final R request; @@ -1568,7 +1597,7 @@ public String toString() { * Sets the current phase on the task if it isn't null. Pulled into its own * method because its more convenient that way. */ - static void setPhase(ReplicationTask task, String phase) { + protected static void setPhase(ReplicationTask task, String phase) { if (task != null) { task.setPhase(phase); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 7fc810808f560..26b15195cd8fc 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -502,7 +502,7 @@ void run() { * * @opensearch.internal */ - class WriteActionReplicasProxy extends ReplicasProxy { + protected class WriteActionReplicasProxy extends ReplicasProxy { @Override public void failShardIfNeeded( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index d2fc354cf9298..e7b53874c9d1b 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -96,7 +96,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList } @Override - protected ReplicationMode getReplicationMode(IndexShard indexShard) { + public ReplicationMode getReplicationMode(IndexShard indexShard) { if (indexShard.isRemoteTranslogEnabled()) { return ReplicationMode.FULL_REPLICATION; } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index a7ffde04314c3..000dac92506f6 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -43,6 +43,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.FanoutReplicationProxy; import org.opensearch.action.support.replication.PendingReplicationActions; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportReplicationAction; @@ -292,7 +293,7 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { primaryTerm, TimeValue.timeValueMillis(20), TimeValue.timeValueSeconds(60), - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(proxy) ); operation.execute(); @@ -325,6 +326,32 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { assertThat(shardInfo.getSuccessful(), equalTo(1 + nbReplicas - unavailableShards.size())); } + public void testGetReplicationModeWithRemoteTranslog() { + TransportVerifyShardBeforeCloseAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + TransportVerifyShardBeforeCloseAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportVerifyShardBeforeCloseAction createAction() { + return new TransportVerifyShardBeforeCloseAction( + Settings.EMPTY, + mock(TransportService.class), + clusterService, + mock(IndicesService.class), + mock(ThreadPool.class), + mock(ShardStateAction.class), + mock(ActionFilters.class) + ); + } + private static ReplicationOperation.Primary< TransportVerifyShardBeforeCloseAction.ShardRequest, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java new file mode 100644 index 0000000000000..09215088bd04b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.flush; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationMode; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportShardFlushActionTests extends OpenSearchTestCase { + + public void testGetReplicationModeWithRemoteTranslog() { + TransportShardFlushAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + TransportShardFlushAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportShardFlushAction createAction() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return new TransportShardFlushAction( + Settings.EMPTY, + mock(TransportService.class), + clusterService, + mock(IndicesService.class), + mock(ThreadPool.class), + mock(ShardStateAction.class), + mock(ActionFilters.class) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java new file mode 100644 index 0000000000000..8c4a6c023f9a5 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.readonly; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationMode; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportVerifyShardIndexBlockActionTests extends OpenSearchTestCase { + + public void testGetReplicationModeWithRemoteTranslog() { + TransportVerifyShardIndexBlockAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + TransportVerifyShardIndexBlockAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportVerifyShardIndexBlockAction createAction() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return new TransportVerifyShardIndexBlockAction( + Settings.EMPTY, + mock(TransportService.class), + clusterService, + mock(IndicesService.class), + mock(ThreadPool.class), + mock(ShardStateAction.class), + mock(ActionFilters.class) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java new file mode 100644 index 0000000000000..b2eee904bad38 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.refresh; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationMode; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportShardRefreshActionTests extends OpenSearchTestCase { + + public void testGetReplicationModeWithRemoteTranslog() { + TransportShardRefreshAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + TransportShardRefreshAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportShardRefreshAction createAction() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return new TransportShardRefreshAction( + Settings.EMPTY, + mock(TransportService.class), + clusterService, + mock(IndicesService.class), + mock(ThreadPool.class), + mock(ShardStateAction.class), + mock(ActionFilters.class) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index b1fa20307a12b..2aff8f6bfc6ab 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -43,18 +43,32 @@ import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest.RefreshPolicy; +import org.opensearch.action.support.replication.ReplicationMode; +import org.opensearch.action.support.replication.ReplicationTask; +import org.opensearch.action.support.replication.TransportReplicationAction.ReplicaResponse; import org.opensearch.action.support.replication.TransportWriteAction.WritePrimaryResult; import org.opensearch.action.update.UpdateHelper; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; +import org.opensearch.cluster.action.index.MappingUpdatedAction; +import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.index.Index; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.VersionConflictEngineException; @@ -62,14 +76,22 @@ import org.opensearch.index.mapper.Mapping; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.RootObjectMapper; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.SystemIndices; import org.opensearch.rest.RestStatus; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.TestTransportChannel; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.Collections; @@ -85,6 +107,7 @@ import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; @@ -1030,6 +1053,161 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { } } + public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { + + final String aId = "test-allocation-id"; + final ShardId shardId = new ShardId("test", "_na_", 0); + final ReplicationTask task = createReplicationTask(); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardBulkAction action = new TransportShardBulkAction( + Settings.EMPTY, + mock(TransportService.class), + mockClusterService(), + mockIndicesService(aId, 1L), + threadPool, + mock(ShardStateAction.class), + mock(MappingUpdatedAction.class), + mock(UpdateHelper.class), + mock(ActionFilters.class), + mock(IndexingPressureService.class), + mock(SystemIndices.class) + ); + action.handlePrimaryTermValidationRequest( + new TransportShardBulkAction.PrimaryTermValidationRequest(aId + "-1", 1, shardId), + createTransportChannel(listener), + task + ); + assertThrows(ShardNotFoundException.class, listener::actionGet); + assertNotNull(task.getPhase()); + assertEquals("failed", task.getPhase()); + } + + public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { + + final String aId = "test-allocation-id"; + final ShardId shardId = new ShardId("test", "_na_", 0); + final ReplicationTask task = createReplicationTask(); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardBulkAction action = new TransportShardBulkAction( + Settings.EMPTY, + mock(TransportService.class), + mockClusterService(), + mockIndicesService(aId, 2L), + threadPool, + mock(ShardStateAction.class), + mock(MappingUpdatedAction.class), + mock(UpdateHelper.class), + mock(ActionFilters.class), + mock(IndexingPressureService.class), + mock(SystemIndices.class) + ); + action.handlePrimaryTermValidationRequest( + new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), + createTransportChannel(listener), + task + ); + assertThrows(IllegalStateException.class, listener::actionGet); + assertNotNull(task.getPhase()); + assertEquals("failed", task.getPhase()); + } + + public void testHandlePrimaryTermValidationRequestSuccess() { + + final String aId = "test-allocation-id"; + final ShardId shardId = new ShardId("test", "_na_", 0); + final ReplicationTask task = createReplicationTask(); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardBulkAction action = new TransportShardBulkAction( + Settings.EMPTY, + mock(TransportService.class), + mockClusterService(), + mockIndicesService(aId, 1L), + threadPool, + mock(ShardStateAction.class), + mock(MappingUpdatedAction.class), + mock(UpdateHelper.class), + mock(ActionFilters.class), + mock(IndexingPressureService.class), + mock(SystemIndices.class) + ); + action.handlePrimaryTermValidationRequest( + new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), + createTransportChannel(listener), + task + ); + assertTrue(listener.actionGet() instanceof ReplicaResponse); + assertEquals(SequenceNumbers.NO_OPS_PERFORMED, ((ReplicaResponse) listener.actionGet()).localCheckpoint()); + assertEquals(SequenceNumbers.NO_OPS_PERFORMED, ((ReplicaResponse) listener.actionGet()).globalCheckpoint()); + assertNotNull(task.getPhase()); + assertEquals("finished", task.getPhase()); + } + + public void testGetReplicationModeWithRemoteTranslog() { + TransportShardBulkAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.PRIMARY_TERM_VALIDATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + TransportShardBulkAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportShardBulkAction createAction() { + return new TransportShardBulkAction( + Settings.EMPTY, + mock(TransportService.class), + mockClusterService(), + mock(IndicesService.class), + threadPool, + mock(ShardStateAction.class), + mock(MappingUpdatedAction.class), + mock(UpdateHelper.class), + mock(ActionFilters.class), + mock(IndexingPressureService.class), + mock(SystemIndices.class) + ); + } + + private ClusterService mockClusterService() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return clusterService; + } + + private IndicesService mockIndicesService(String aId, long primaryTerm) { + // Mock few of the required classes + IndicesService indicesService = mock(IndicesService.class); + IndexService indexService = mock(IndexService.class); + IndexShard indexShard = mock(IndexShard.class); + when(indicesService.indexServiceSafe(any(Index.class))).thenReturn(indexService); + when(indexService.getShard(anyInt())).thenReturn(indexShard); + when(indexShard.getOperationPrimaryTerm()).thenReturn(primaryTerm); + + // Mock routing entry, allocation id + AllocationId allocationId = mock(AllocationId.class); + ShardRouting shardRouting = mock(ShardRouting.class); + when(indexShard.routingEntry()).thenReturn(shardRouting); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(allocationId.getId()).thenReturn(aId); + return indicesService; + } + + private ReplicationTask createReplicationTask() { + return new ReplicationTask(0, null, null, null, null, null); + } + + /** + * Transport channel that is needed for replica operation testing. + */ + private TransportChannel createTransportChannel(final PlainActionFuture listener) { + return new TestTransportChannel(listener); + } + private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { if (randomBoolean()) { // add a response to the request and thereby check that it is ignored for the primary. diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index acf46e2a63333..d6d944b5b9b45 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -38,6 +38,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlocks; @@ -50,6 +51,7 @@ import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.index.Index; @@ -68,6 +70,7 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import java.nio.charset.Charset; @@ -222,4 +225,35 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { } } } + + public void testGetReplicationModeWithRemoteTranslog() { + final TransportResyncReplicationAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + final TransportResyncReplicationAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private TransportResyncReplicationAction createAction() { + ClusterService clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + return new TransportResyncReplicationAction( + Settings.EMPTY, + mock(TransportService.class), + clusterService, + mock(IndicesService.class), + threadPool, + mock(ShardStateAction.class), + new ActionFilters(new HashSet<>()), + mock(IndexingPressureService.class), + new SystemIndices(emptyMap()) + ); + } } diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 3a689e356bbdf..c5d4f3326746d 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -168,7 +168,7 @@ public void testReplication() throws Exception { listener, replicasProxy, primaryTerm, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(replicasProxy) ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -239,7 +239,7 @@ public void testReplicationWithRemoteTranslogEnabled() throws Exception { listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -304,7 +304,7 @@ public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Ex listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -366,7 +366,7 @@ public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { listener, replicasProxy, 0, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(replicasProxy) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -448,7 +448,7 @@ public void testRetryTransientReplicationFailure() throws Exception { primaryTerm, TimeValue.timeValueMillis(20), TimeValue.timeValueSeconds(60), - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(replicasProxy) ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -591,7 +591,7 @@ public void failShard(String message, Exception exception) { listener, replicasProxy, primaryTerm, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(replicasProxy) ); op.execute(); @@ -657,7 +657,7 @@ public void perform(Request request, ActionListener listener) { listener, new TestReplicaProxy(), primaryTerm, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(new TestReplicaProxy()) ); op.execute(); @@ -714,7 +714,7 @@ public void testWaitForActiveShards() throws Exception { threadPool, "test", primaryTerm, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(new TestReplicaProxy()) ); if (passesActiveShardCheck) { @@ -781,7 +781,7 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) listener, replicas, primaryTerm, - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(replicas) ); operation.execute(); diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 1e20c2e948f6e..75063d76ff8dc 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -152,4 +153,31 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { } } + public void testGetReplicationModeWithRemoteTranslog() { + final GlobalCheckpointSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + final GlobalCheckpointSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private GlobalCheckpointSyncAction createAction() { + final IndicesService indicesService = mock(IndicesService.class); + return new GlobalCheckpointSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 54a88d57b2b69..2e058b6dab560 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; @@ -209,4 +210,30 @@ public void testBlocks() { assertNull(action.indexBlockLevel()); } + public void testGetReplicationModeWithRemoteTranslog() { + final RetentionLeaseBackgroundSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + final RetentionLeaseBackgroundSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private RetentionLeaseBackgroundSyncAction createAction() { + return new RetentionLeaseBackgroundSyncAction( + Settings.EMPTY, + transportService, + clusterService, + mock(IndicesService.class), + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 60ee3360ff235..b07b740fe3744 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; @@ -206,4 +207,32 @@ public void testBlocks() { assertNull(action.indexBlockLevel()); } + public void testGetReplicationModeWithRemoteTranslog() { + final RetentionLeaseSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + final RetentionLeaseSyncAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + } + + private RetentionLeaseSyncAction createAction() { + return new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + mock(IndicesService.class), + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexingPressureService(Settings.EMPTY, clusterService), + new SystemIndices(emptyMap()) + ); + } + } diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 23fad53dd1201..728397e665a49 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -12,6 +12,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; @@ -158,4 +159,31 @@ public void testPublishCheckpointActionOnReplica() { } + public void testGetReplicationModeWithRemoteTranslog() { + final PublishCheckpointAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + public void testGetReplicationModeWithLocalTranslog() { + final PublishCheckpointAction action = createAction(); + final IndexShard indexShard = mock(IndexShard.class); + when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); + } + + private PublishCheckpointAction createAction() { + return new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + mock(IndicesService.class), + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + mock(SegmentReplicationTargetService.class) + ); + } + } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 92c80ac1799ef..f4babda725057 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -729,7 +729,7 @@ public void execute() { primaryTerm, TimeValue.timeValueMillis(20), TimeValue.timeValueSeconds(60), - new FanoutReplicationProxy<>() + new FanoutReplicationProxy<>(new ReplicasRef()) ).execute(); } catch (Exception e) { listener.onFailure(e); From 5cfae6e05ad3a686d32e0769a2b4c2749ed24e89 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 27 Dec 2022 11:34:14 +0530 Subject: [PATCH 03/13] [Remote Translog] Introduce remote translog with upload functionality (#5392) * Introduce remote translog with upload functionality Signed-off-by: Gaurav Bafna Co-authored-by: Bukhtawar Khan --- .../common/blobstore/fs/FsBlobStore.java | 2 +- .../common/io/stream/BytesStreamInput.java | 15 +- .../translog/InternalTranslogFactory.java | 2 +- .../index/translog/LocalTranslog.java | 160 +++ .../index/translog/RemoteFsTranslog.java | 245 ++++ .../opensearch/index/translog/Translog.java | 108 +- .../index/translog/TranslogWriter.java | 5 +- .../translog/TruncateTranslogAction.java | 2 +- .../transfer/BlobStoreTransferService.java | 11 + .../transfer/FileTransferTracker.java | 88 ++ .../translog/transfer/TransferService.java | 19 + .../TranslogCheckpointTransferSnapshot.java | 30 +- .../transfer/TranslogTransferManager.java | 17 +- .../org/opensearch/threadpool/ThreadPool.java | 6 + .../index/engine/InternalEngineTests.java | 5 +- ...slogTests.java => LocalTranslogTests.java} | 222 ++- .../index/translog/RemoteFSTranslogTests.java | 1262 +++++++++++++++++ .../index/translog/TestTranslog.java | 85 ++ .../translog/TranslogManagerTestCase.java | 2 +- .../BlobStoreTransferServiceTests.java | 10 + .../transfer/FileTransferTrackerTests.java | 77 + .../threadpool/ScalingThreadPoolTests.java | 1 + .../index/engine/EngineTestCase.java | 5 +- 23 files changed, 2128 insertions(+), 251 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/translog/LocalTranslog.java create mode 100644 server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java create mode 100644 server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java rename server/src/test/java/org/opensearch/index/translog/{TranslogTests.java => LocalTranslogTests.java} (96%) create mode 100644 server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java create mode 100644 server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobStore.java index f25a741b93c8d..6944e01d8ee24 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobStore.java @@ -90,7 +90,7 @@ public void close() { // nothing to do here... } - private synchronized Path buildAndCreate(BlobPath path) throws IOException { + protected synchronized Path buildAndCreate(BlobPath path) throws IOException { Path f = buildPath(path); if (readOnly == false) { Files.createDirectories(f); diff --git a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java index 1572cd1f500f4..8bf1fe846cd8b 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamInput.java @@ -80,15 +80,19 @@ public void skipBytes(long count) { pos += count; } - // NOTE: AIOOBE not EOF if you read too much @Override - public byte readByte() { + public byte readByte() throws EOFException { + if (eof()) { + throw new EOFException(); + } return bytes[pos++]; } - // NOTE: AIOOBE not EOF if you read too much @Override - public void readBytes(byte[] b, int offset, int len) { + public void readBytes(byte[] b, int offset, int len) throws EOFException { + if (available() < len) { + throw new EOFException(); + } System.arraycopy(bytes, pos, b, offset, len); pos += len; } @@ -111,6 +115,9 @@ protected void ensureCanReadBytes(int length) throws EOFException { @Override public int read() throws IOException { + if (eof()) { + throw new EOFException(); + } return bytes[pos++] & 0xFF; } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java index 566eda4fe4a6e..a363992203721 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -29,7 +29,7 @@ public Translog newTranslog( LongConsumer persistedSequenceNumberConsumer ) throws IOException { - return new Translog( + return new LocalTranslog( translogConfig, translogUUID, translogDeletionPolicy, diff --git a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java new file mode 100644 index 0000000000000..404132f45f7cb --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.internal.io.IOUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * A {@link Translog} implementation that creates translog files in local filesystem. + * @opensearch.internal + */ +public class LocalTranslog extends Translog { + + /** + * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is + * {@code null}. If the generation is {@code null} this method is destructive and will delete all files in the translog path given. If + * the generation is not {@code null}, this method tries to open the given translog generation. The generation is treated as the last + * generation referenced from already committed data. This means all operations that have not yet been committed should be in the + * translog file referenced by this generation. The translog creation will fail if this generation can't be opened. + * + * @param config the configuration of this translog + * @param translogUUID the translog uuid to open, null for a new translog + * @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely + * deleted + * @param globalCheckpointSupplier a supplier for the global checkpoint + * @param primaryTermSupplier a supplier for the latest value of primary term of the owning index shard. The latest term value is + * examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside + * that a new generation is rolled when the term is increased. This guarantee allows to us to validate + * and reject operation whose term is higher than the primary term stored in the translog header. + * @param persistedSequenceNumberConsumer a callback that's called whenever an operation with a given sequence number is successfully + * persisted. + */ + public LocalTranslog( + final TranslogConfig config, + final String translogUUID, + TranslogDeletionPolicy deletionPolicy, + final LongSupplier globalCheckpointSupplier, + final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); + try { + final Checkpoint checkpoint = readCheckpoint(location); + final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); + final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); + // this is special handling for error condition when we create a new writer but we fail to bake + // the newly written file (generation+1) into the checkpoint. This is still a valid state + // we just need to cleanup before we continue + // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: + // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example + // + // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that + // file exists. If not we don't even try to clean it up and wait until we fail creating it + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) + : "unexpected translog file: [" + nextTranslogFile + "]"; + if (Files.exists(currentCheckpointFile) // current checkpoint is already copied + && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning + logger.warn( + "deleted previously created, but not yet committed, next generation [{}]. This can happen due to a" + + " tragic exception when creating a new generation", + nextTranslogFile.getFileName() + ); + } + this.readers.addAll(recoverFromFiles(checkpoint)); + if (readers.isEmpty()) { + throw new IllegalStateException("at least one reader must be recovered"); + } + boolean success = false; + current = null; + try { + current = createWriter( + checkpoint.generation + 1, + getMinFileGeneration(), + checkpoint.globalCheckpoint, + persistedSequenceNumberConsumer + ); + success = true; + } finally { + // we have to close all the recovered ones otherwise we leak file handles here + // for instance if we have a lot of tlog and we can't create the writer we keep on holding + // on to all the uncommitted tlog files if we don't close + if (success == false) { + IOUtils.closeWhileHandlingException(readers); + } + } + } catch (Exception e) { + // close the opened translog files if we fail to create a new translog... + IOUtils.closeWhileHandlingException(current); + IOUtils.closeWhileHandlingException(readers); + throw e; + } + } + + /** + * Ensures that the given location has be synced / written to the underlying storage. + * + * @return Returns true iff this call caused an actual sync operation otherwise false + */ + @Override + public boolean ensureSynced(Location location) throws IOException { + try (ReleasableLock ignored = readLock.acquire()) { + if (location.generation == current.getGeneration()) { // if we have a new one it's already synced + ensureOpen(); + return current.syncUpTo(location.translogLocation + location.size); + } + } catch (final Exception ex) { + closeOnTragicEvent(ex); + throw ex; + } + return false; + } + + /** + * return stats + */ + @Override + public TranslogStats stats() { + // acquire lock to make the two numbers roughly consistent (no file change half way) + try (ReleasableLock lock = readLock.acquire()) { + long uncommittedGen = getMinGenerationForSeqNo(deletionPolicy.getLocalCheckpointOfSafeCommit() + 1).translogFileGeneration; + return new TranslogStats( + totalOperations(), + sizeInBytes(), + totalOperationsByMinGen(uncommittedGen), + sizeInBytesByMinGen(uncommittedGen), + earliestLastModifiedAge() + ); + } + } + + @Override + public void close() throws IOException { + assert Translog.calledFromOutsideOrViaTragedyClose() + : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; + if (closed.compareAndSet(false, true)) { + try (ReleasableLock lock = writeLock.acquire()) { + try { + current.sync(); + } finally { + closeFilesIfNoPendingRetentionLocks(); + } + } finally { + logger.debug("translog closed"); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java new file mode 100644 index 0000000000000..2af41367d860b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.index.translog.transfer.FileTransferTracker; +import org.opensearch.index.translog.transfer.TransferSnapshot; +import org.opensearch.index.translog.transfer.TranslogCheckpointTransferSnapshot; +import org.opensearch.index.translog.transfer.TranslogTransferManager; +import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * A Translog implementation which syncs local FS with a remote store + * The current impl uploads translog , ckp and metadata to remote store + * for every sync, post syncing to disk. Post that, a new generation is + * created. + * + * @opensearch.internal + */ +public class RemoteFsTranslog extends Translog { + + private final BlobStoreRepository blobStoreRepository; + private final TranslogTransferManager translogTransferManager; + private final FileTransferTracker fileTransferTracker; + private volatile long maxRemoteTranslogGenerationUploaded; + + public RemoteFsTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + BlobStoreRepository blobStoreRepository, + ExecutorService executorService + ) throws IOException { + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); + this.blobStoreRepository = blobStoreRepository; + fileTransferTracker = new FileTransferTracker(shardId); + this.translogTransferManager = new TranslogTransferManager( + new BlobStoreTransferService(blobStoreRepository.blobStore(), executorService), + blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())), + fileTransferTracker, + fileTransferTracker::exclusionFilter + ); + + try { + final Checkpoint checkpoint = readCheckpoint(location); + this.readers.addAll(recoverFromFiles(checkpoint)); + if (readers.isEmpty()) { + throw new IllegalStateException("at least one reader must be recovered"); + } + boolean success = false; + current = null; + try { + current = createWriter( + checkpoint.generation + 1, + getMinFileGeneration(), + checkpoint.globalCheckpoint, + persistedSequenceNumberConsumer + ); + success = true; + } finally { + // we have to close all the recovered ones otherwise we leak file handles here + // for instance if we have a lot of tlog and we can't create the writer we keep + // on holding + // on to all the uncommitted tlog files if we don't close + if (success == false) { + IOUtils.closeWhileHandlingException(readers); + } + } + } catch (Exception e) { + // close the opened translog files if we fail to create a new translog... + IOUtils.closeWhileHandlingException(current); + IOUtils.closeWhileHandlingException(readers); + throw e; + } + } + + @Override + public boolean ensureSynced(Location location) throws IOException { + try (ReleasableLock ignored = writeLock.acquire()) { + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); + } + } catch (final Exception ex) { + closeOnTragicEvent(ex); + throw ex; + } + return false; + } + + @Override + public void rollGeneration() throws IOException { + syncBeforeRollGeneration(); + if (current.totalOperations() == 0 && primaryTermSupplier.getAsLong() == current.getPrimaryTerm()) { + return; + } + prepareAndUpload(primaryTermSupplier.getAsLong(), null); + } + + private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + try (Releasable ignored = writeLock.acquire()) { + if (generation == null || generation == current.getGeneration()) { + try { + final TranslogReader reader = current.closeIntoReader(); + readers.add(reader); + copyCheckpointTo(location.resolve(getCommitCheckpointFileName(current.getGeneration()))); + if (closed.get() == false) { + logger.trace("Creating new writer for gen: [{}]", current.getGeneration() + 1); + current = createWriter(current.getGeneration() + 1); + } + } catch (final Exception e) { + tragedy.setTragicException(e); + closeOnTragicEvent(e); + throw e; + } + } else if (generation < current.getGeneration()) { + return false; + } + + // Do we need remote writes in sync fashion ? + // If we don't , we should swallow FileAlreadyExistsException while writing to remote store + // and also verify for same during primary-primary relocation + // Writing remote in sync fashion doesn't hurt as global ckp update + // is not updated in remote translog except in primary to primary recovery. + if (generation == null) { + if (closed.get() == false) { + return upload(primaryTerm, current.getGeneration() - 1); + } else { + return upload(primaryTerm, current.getGeneration()); + } + } else { + return upload(primaryTerm, generation); + } + } + } + + private boolean upload(Long primaryTerm, Long generation) throws IOException { + logger.trace("uploading translog for {} {} ", primaryTerm, generation); + try ( + TranslogCheckpointTransferSnapshot transferSnapshotProvider = new TranslogCheckpointTransferSnapshot.Builder( + primaryTerm, + generation, + location, + readers, + Translog::getCommitCheckpointFileName + ).build() + ) { + Releasable transferReleasable = Releasables.wrap(deletionPolicy.acquireTranslogGen(getMinFileGeneration())); + return translogTransferManager.transferSnapshot(transferSnapshotProvider, new TranslogTransferListener() { + @Override + + public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { + transferReleasable.close(); + closeFilesIfNoPendingRetentionLocks(); + maxRemoteTranslogGenerationUploaded = generation; + logger.trace("uploaded translog for {} {} ", primaryTerm, generation); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { + transferReleasable.close(); + closeFilesIfNoPendingRetentionLocks(); + if (ex instanceof IOException) { + throw (IOException) ex; + } else { + throw (RuntimeException) ex; + } + } + }); + } + + } + + // Visible for testing + public Set allUploaded() { + return fileTransferTracker.allUploaded(); + } + + private boolean syncToDisk() throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + return current.sync(); + } catch (final Exception ex) { + closeOnTragicEvent(ex); + throw ex; + } + } + + @Override + public void sync() throws IOException { + try { + if (syncToDisk() || syncNeeded()) { + prepareAndUpload(primaryTermSupplier.getAsLong(), null); + } + } catch (final Exception e) { + tragedy.setTragicException(e); + closeOnTragicEvent(e); + throw e; + } + } + + /** + * Returns true if an fsync and/or remote transfer is required to ensure durability of the translogs operations or it's metadata. + */ + public boolean syncNeeded() { + try (ReleasableLock lock = readLock.acquire()) { + return current.syncNeeded() + || (maxRemoteTranslogGenerationUploaded + 1 < this.currentFileGeneration() && current.totalOperations() == 0); + } + } + + @Override + public void close() throws IOException { + assert Translog.calledFromOutsideOrViaTragedyClose() + : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; + if (closed.compareAndSet(false, true)) { + try (ReleasableLock lock = writeLock.acquire()) { + sync(); + } finally { + logger.debug("translog closed"); + closeFilesIfNoPendingRetentionLocks(); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 7f22ad1bf320d..f5a9faff8bfff 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -112,7 +112,7 @@ * * @opensearch.internal */ -public class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable { +public abstract class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable { /* * TODO @@ -134,21 +134,21 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID()); // the list of translog readers is guaranteed to be in order of translog generation - private final List readers = new ArrayList<>(); - private final BigArrays bigArrays; + protected final List readers = new ArrayList<>(); + protected final BigArrays bigArrays; protected final ReleasableLock readLock; protected final ReleasableLock writeLock; - private final Path location; - private TranslogWriter current; + protected final Path location; + protected TranslogWriter current; protected final TragicExceptionHolder tragedy = new TragicExceptionHolder(); - private final AtomicBoolean closed = new AtomicBoolean(); - private final TranslogConfig config; - private final LongSupplier globalCheckpointSupplier; - private final LongSupplier primaryTermSupplier; - private final String translogUUID; - private final TranslogDeletionPolicy deletionPolicy; - private final LongConsumer persistedSequenceNumberConsumer; + protected final AtomicBoolean closed = new AtomicBoolean(); + protected final TranslogConfig config; + protected final LongSupplier globalCheckpointSupplier; + protected final LongSupplier primaryTermSupplier; + protected final String translogUUID; + protected final TranslogDeletionPolicy deletionPolicy; + protected final LongConsumer persistedSequenceNumberConsumer; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is @@ -190,61 +190,10 @@ public Translog( writeLock = new ReleasableLock(rwl.writeLock()); this.location = config.getTranslogPath(); Files.createDirectories(this.location); - - try { - final Checkpoint checkpoint = readCheckpoint(location); - final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); - final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); - // this is special handling for error condition when we create a new writer but we fail to bake - // the newly written file (generation+1) into the checkpoint. This is still a valid state - // we just need to cleanup before we continue - // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: - // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example - // - // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that - // file exists. If not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) - : "unexpected translog file: [" + nextTranslogFile + "]"; - if (Files.exists(currentCheckpointFile) // current checkpoint is already copied - && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning - logger.warn( - "deleted previously created, but not yet committed, next generation [{}]. This can happen due to a" - + " tragic exception when creating a new generation", - nextTranslogFile.getFileName() - ); - } - this.readers.addAll(recoverFromFiles(checkpoint)); - if (readers.isEmpty()) { - throw new IllegalStateException("at least one reader must be recovered"); - } - boolean success = false; - current = null; - try { - current = createWriter( - checkpoint.generation + 1, - getMinFileGeneration(), - checkpoint.globalCheckpoint, - persistedSequenceNumberConsumer - ); - success = true; - } finally { - // we have to close all the recovered ones otherwise we leak file handles here - // for instance if we have a lot of tlog and we can't create the writer we keep on holding - // on to all the uncommitted tlog files if we don't close - if (success == false) { - IOUtils.closeWhileHandlingException(readers); - } - } - } catch (Exception e) { - // close the opened translog files if we fail to create a new translog... - IOUtils.closeWhileHandlingException(current); - IOUtils.closeWhileHandlingException(readers); - throw e; - } } /** recover all translog files found on disk */ - private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { + protected ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList foundTranslogs = new ArrayList<>(); try (ReleasableLock ignored = writeLock.acquire()) { @@ -255,7 +204,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws // the generation id we found in the lucene commit. This gives for better error messages if the wrong // translog was found. for (long i = checkpoint.generation; i >= minGenerationToRecoverFrom; i--) { - Path committedTranslogFile = location.resolve(getFilename(i)); + Path committedTranslogFile = location.resolve(Translog.getFilename(i)); if (Files.exists(committedTranslogFile) == false) { throw new TranslogCorruptedException( committedTranslogFile.toString(), @@ -270,7 +219,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws } final Checkpoint readerCheckpoint = i == checkpoint.generation ? checkpoint - : Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))); + : Checkpoint.read(location.resolve(Translog.getCommitCheckpointFileName(i))); final TranslogReader reader = openReader(committedTranslogFile, readerCheckpoint); assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() @@ -287,11 +236,11 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws // when we clean up files, we first update the checkpoint with a new minReferencedTranslog and then delete them; // if we crash just at the wrong moment, it may be that we leave one unreferenced file behind so we delete it if there IOUtils.deleteFilesIgnoringExceptions( - location.resolve(getFilename(minGenerationToRecoverFrom - 1)), - location.resolve(getCommitCheckpointFileName(minGenerationToRecoverFrom - 1)) + location.resolve(Translog.getFilename(minGenerationToRecoverFrom - 1)), + location.resolve(Translog.getCommitCheckpointFileName(minGenerationToRecoverFrom - 1)) ); - Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); + Path commitCheckpoint = location.resolve(Translog.getCommitCheckpointFileName(checkpoint.generation)); if (Files.exists(commitCheckpoint)) { Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint); if (checkpoint.equals(checkpointFromDisk) == false) { @@ -317,7 +266,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws return foundTranslogs; } - private void copyCheckpointTo(Path targetPath) throws IOException { + protected void copyCheckpointTo(Path targetPath) throws IOException { // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, CHECKPOINT_SUFFIX); boolean tempFileRenamed = false; @@ -383,7 +332,7 @@ public boolean isOpen() { return closed.get() == false; } - private static boolean calledFromOutsideOrViaTragedyClose() { + protected static boolean calledFromOutsideOrViaTragedyClose() { List frames = Stream.of(Thread.currentThread().getStackTrace()).skip(3). // skip getStackTrace, current method // and close method frames limit(10). // limit depth of analysis to 10 frames, it should be enough to catch closing with, e.g. IOUtils @@ -817,7 +766,7 @@ public static String getFilename(long generation) { return TRANSLOG_FILE_PREFIX + generation + TRANSLOG_FILE_SUFFIX; } - static String getCommitCheckpointFileName(long generation) { + public static String getCommitCheckpointFileName(long generation) { return TRANSLOG_FILE_PREFIX + generation + CHECKPOINT_SUFFIX; } @@ -868,18 +817,7 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { * * @return Returns true iff this call caused an actual sync operation otherwise false */ - public boolean ensureSynced(Location location) throws IOException { - try (ReleasableLock lock = readLock.acquire()) { - if (location.generation == current.getGeneration()) { // if we have a new one it's already synced - ensureOpen(); - return current.syncUpTo(location.translogLocation + location.size); - } - } catch (final Exception ex) { - closeOnTragicEvent(ex); - throw ex; - } - return false; - } + public abstract boolean ensureSynced(Location location) throws IOException; /** * Ensures that all locations in the given stream have been synced / written to the underlying storage. @@ -1910,7 +1848,7 @@ long getFirstOperationPosition() { // for testing return current.getFirstOperationOffset(); } - private void ensureOpen() { + protected void ensureOpen() { if (closed.get()) { throw new AlreadyClosedException("translog is already closed", tragedy.get()); } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index 413975f82678b..178cdc110ec3b 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -359,9 +359,10 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { * * Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before * raising the exception. + * @return */ - public void sync() throws IOException { - syncUpTo(Long.MAX_VALUE); + public boolean sync() throws IOException { + return syncUpTo(Long.MAX_VALUE); } /** diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 33294eb9e7d24..9180f110cc020 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -213,7 +213,7 @@ public long minTranslogGenRequired(List readers, TranslogWriter } }; try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( translogConfig, translogUUID, retainAllTranslogPolicy, diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index 36d9d71217837..3a8e77d4cc1fc 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Set; import java.util.concurrent.ExecutorService; /** @@ -68,4 +69,14 @@ public void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable throw ex; } } + + @Override + public InputStream downloadBlob(Iterable path, String fileName) throws IOException { + return blobStore.blobContainer((BlobPath) path).readBlob(fileName); + } + + @Override + public Set listAll(Iterable path) throws IOException { + return blobStore.blobContainer((BlobPath) path).listBlobs().keySet(); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java new file mode 100644 index 0000000000000..4e697dae5d236 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import org.opensearch.index.translog.transfer.listener.FileTransferListener; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * FileTransferTracker keeps track of translog files uploaded to the remote translog + */ +public class FileTransferTracker implements FileTransferListener { + + private final ConcurrentHashMap fileTransferTracker; + private final ShardId shardId; + + public FileTransferTracker(ShardId shardId) { + this.shardId = shardId; + this.fileTransferTracker = new ConcurrentHashMap<>(); + } + + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + TransferState targetState = TransferState.SUCCESS; + fileTransferTracker.compute(fileSnapshot.getName(), (k, v) -> { + if (v == null || v.validateNextState(targetState)) { + return targetState; + } + throw new IllegalStateException("Unexpected transfer state " + v + "while setting target to" + targetState); + }); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + TransferState targetState = TransferState.FAILED; + fileTransferTracker.compute(fileSnapshot.getName(), (k, v) -> { + if (v == null || v.validateNextState(targetState)) { + return targetState; + } + throw new IllegalStateException("Unexpected transfer state " + v + "while setting target to" + targetState); + }); + } + + public Set exclusionFilter(Set original) { + return original.stream() + .filter(fileSnapshot -> fileTransferTracker.get(fileSnapshot.getName()) != TransferState.SUCCESS) + .collect(Collectors.toSet()); + } + + public Set allUploaded() { + Set successFileTransferTracker = new HashSet<>(); + fileTransferTracker.forEach((k, v) -> { + if (v == TransferState.SUCCESS) { + successFileTransferTracker.add(k); + } + }); + return successFileTransferTracker; + } + + /** + * Represents the state of the upload operation + */ + private enum TransferState { + SUCCESS, + FAILED; + + public boolean validateNextState(TransferState target) { + switch (this) { + case FAILED: + return true; + case SUCCESS: + return Set.of(SUCCESS).contains(target); + } + return false; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index ed6c185352833..6a67de99287fd 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -12,6 +12,8 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import java.io.IOException; +import java.io.InputStream; +import java.util.Set; /** * Interface for the translog transfer service responsible for interacting with a remote store @@ -40,4 +42,21 @@ void uploadBlobAsync( */ void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable remotePath) throws IOException; + /** + * Lists the files + * @param path : the path to list + * @return : the lists of files + * @throws IOException the exception while listing the path + */ + Set listAll(Iterable path) throws IOException; + + /** + * + * @param path + * @param fileName + * @return inputstream of the remote file + * @throws IOException the exception while reading the data + */ + InputStream downloadBlob(Iterable path, String fileName) throws IOException; + } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 30b81627614b7..b34c2282e874f 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -11,8 +11,10 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.index.translog.TranslogReader; +import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -21,16 +23,16 @@ import java.util.stream.Collectors; import java.util.stream.LongStream; +import static org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; -import static org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; /** * Implementation for a {@link TransferSnapshot} which builds the snapshot from the translog and checkpoint files present on the local-disk * * @opensearch.internal */ -public class TranslogCheckpointTransferSnapshot implements TransferSnapshot { +public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Closeable { private final Set> translogCheckpointFileInfoTupleSet; private final int size; @@ -69,6 +71,26 @@ public Set getCheckpointFileSnapshots() { return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v2).collect(Collectors.toSet()); } + public void close() throws IOException { + List exceptionList = new ArrayList<>(); + Set fileSnapshots = getTranslogFileSnapshots(); + fileSnapshots.addAll(getCheckpointFileSnapshots()); + + for (FileSnapshot fileSnapshot : fileSnapshots) { + try { + fileSnapshot.close(); + } catch (IOException e) { + exceptionList.add(e); + } + } + + if (!exceptionList.isEmpty()) { + IOException ex = new IOException("IO Exception while closing file snapshots"); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } + @Override public String toString() { return new StringBuilder("TranslogTransferSnapshot [").append(" primary term = ") @@ -136,9 +158,11 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { translogTransferSnapshot.setMinTranslogGeneration(highestGenMinTranslogGeneration); assert this.primaryTerm == highestGenPrimaryTerm : "inconsistent primary term"; - assert this.generation == highestGeneration : "inconsistent generation"; + assert this.generation == highestGeneration : " inconsistent generation "; + final long finalHighestGeneration = highestGeneration; assert LongStream.iterate(lowestGeneration, i -> i + 1) .limit(highestGeneration) + .filter(l -> (l <= finalHighestGeneration)) .boxed() .collect(Collectors.toList()) .equals(generations.stream().sorted().collect(Collectors.toList())) == true : "generation gaps found"; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 02ebab8ed6826..07cb2805ce1a6 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -42,6 +42,7 @@ public class TranslogTransferManager { private final TransferService transferService; private final BlobPath remoteBaseTransferPath; + private final BlobPath remoteMetadaTransferPath; private final FileTransferListener fileTransferListener; private final UnaryOperator> exclusionFilter; @@ -49,6 +50,8 @@ public class TranslogTransferManager { private static final Logger logger = LogManager.getLogger(TranslogTransferManager.class); + private final static String METADATA_DIR = "metadata"; + public TranslogTransferManager( TransferService transferService, BlobPath remoteBaseTransferPath, @@ -57,6 +60,7 @@ public TranslogTransferManager( ) { this.transferService = transferService; this.remoteBaseTransferPath = remoteBaseTransferPath; + this.remoteMetadaTransferPath = remoteBaseTransferPath.add(METADATA_DIR); this.fileTransferListener = fileTransferListener; this.exclusionFilter = exclusionFilter; } @@ -68,6 +72,11 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans try { toUpload.addAll(exclusionFilter.apply(transferSnapshot.getTranslogFileSnapshots())); toUpload.addAll(exclusionFilter.apply(transferSnapshot.getCheckpointFileSnapshots())); + if (toUpload.isEmpty()) { + logger.trace("Nothing to upload for transfer"); + translogTransferListener.onUploadComplete(transferSnapshot); + return true; + } final CountDownLatch latch = new CountDownLatch(toUpload.size()); LatchedActionListener latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(fileTransferListener::onSuccess, ex -> { @@ -104,15 +113,11 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans throw ex; } if (exceptionList.isEmpty()) { - final TransferFileSnapshot transferFileSnapshot = prepareMetadata(transferSnapshot); - transferService.uploadBlob( - prepareMetadata(transferSnapshot), - remoteBaseTransferPath.add(String.valueOf(transferFileSnapshot.getPrimaryTerm())) - ); + transferService.uploadBlob(prepareMetadata(transferSnapshot), remoteMetadaTransferPath); translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new RuntimeException("Failed to upload some files during transfer"); + Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 928b4871590c6..6f886de9ee88f 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -108,6 +108,7 @@ public static class Names { public static final String FETCH_SHARD_STORE = "fetch_shard_store"; public static final String SYSTEM_READ = "system_read"; public static final String SYSTEM_WRITE = "system_write"; + public static final String TRANSLOG_TRANSFER = "translog_transfer"; } /** @@ -172,6 +173,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.SEARCH_THROTTLED, ThreadPoolType.RESIZABLE); map.put(Names.SYSTEM_READ, ThreadPoolType.FIXED); map.put(Names.SYSTEM_WRITE, ThreadPoolType.FIXED); + map.put(Names.TRANSLOG_TRANSFER, ThreadPoolType.SCALING); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -244,6 +246,10 @@ public ThreadPool( ); builders.put(Names.SYSTEM_READ, new FixedExecutorBuilder(settings, Names.SYSTEM_READ, halfProcMaxAt5, 2000, false)); builders.put(Names.SYSTEM_WRITE, new FixedExecutorBuilder(settings, Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, false)); + builders.put( + Names.TRANSLOG_TRANSFER, + new ScalingExecutorBuilder(Names.TRANSLOG_TRANSFER, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + ); for (final ExecutorBuilder builder : customBuilders) { if (builders.containsKey(builder.name())) { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index abc3e8465b22a..878e01c4130bf 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -141,12 +141,13 @@ import org.opensearch.index.shard.ShardUtils; import org.opensearch.index.store.Store; import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; +import org.opensearch.index.translog.LocalTranslog; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; -import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; +import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.IndexSettingsModule; @@ -3664,7 +3665,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - Translog translog = new Translog( + Translog translog = new LocalTranslog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java similarity index 96% rename from server/src/test/java/org/opensearch/index/translog/TranslogTests.java rename to server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index 964fc8f21c388..8f58016a639e6 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -113,7 +113,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.Deque; import java.util.HashMap; import java.util.HashSet; @@ -165,7 +164,7 @@ import static org.mockito.Mockito.when; @LuceneTestCase.SuppressFileSystems("ExtrasFS") -public class TranslogTests extends OpenSearchTestCase { +public class LocalTranslogTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); @@ -217,7 +216,7 @@ protected Translog createTranslog(TranslogConfig config) throws IOException { shardId, primaryTerm.get() ); - return new Translog( + return new LocalTranslog( config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), @@ -228,7 +227,7 @@ protected Translog createTranslog(TranslogConfig config) throws IOException { } protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { - return new Translog( + return new LocalTranslog( config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), @@ -264,7 +263,7 @@ private Translog create(Path path) throws IOException { final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return new Translog( + return new LocalTranslog( translogConfig, translogUUID, deletionPolicy, @@ -852,27 +851,12 @@ private void assertFilePresences(Translog translog) { } - static class LocationOperation implements Comparable { - final Translog.Operation operation; - final Translog.Location location; - - LocationOperation(Translog.Operation operation, Translog.Location location) { - this.operation = operation; - this.location = location; - } - - @Override - public int compareTo(LocationOperation o) { - return location.compareTo(o.location); - } - } - public void testConcurrentWritesWithVaryingSize() throws Throwable { final int opsPerThread = randomIntBetween(10, 200); int threadCount = 2 + randomInt(5); logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread); - final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); + final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); Thread[] threads = new Thread[threadCount]; final Exception[] threadExceptions = new Exception[threadCount]; @@ -902,10 +886,10 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable { threads[i].join(60 * 1000); } - List collect = new ArrayList<>(writtenOperations); + List collect = new ArrayList<>(writtenOperations); Collections.sort(collect); try (Translog.Snapshot snapshot = translog.newSnapshot()) { - for (LocationOperation locationOperation : collect) { + for (TestTranslog.LocationOperation locationOperation : collect) { Translog.Operation op = snapshot.next(); assertNotNull(op); Translog.Operation expectedOp = locationOperation.operation; @@ -1319,7 +1303,7 @@ public void testLocationComparison() throws IOException { } assertEquals(max.generation, translog.currentFileGeneration()); - try (Translog.Snapshot snap = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snap = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { Translog.Operation next; Translog.Operation maxOp = null; while ((next = snap.next()) != null) { @@ -1511,7 +1495,7 @@ public int write(ByteBuffer src) throws IOException { ); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, new DefaultTranslogDeletionPolicy(-1, -1, 0), @@ -1626,7 +1610,7 @@ public void force(boolean metaData) throws IOException { ); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, new DefaultTranslogDeletionPolicy(-1, -1, 0), @@ -1732,7 +1716,7 @@ public void testBasicRecovery() throws IOException { assertNull(snapshot.next()); } } else { - translog = new Translog( + translog = new LocalTranslog( config, translogGeneration.translogUUID, translog.getDeletionPolicy(), @@ -1791,7 +1775,7 @@ public void testRecoveryUncommitted() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -1807,7 +1791,7 @@ public void testRecoveryUncommitted() throws IOException { translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snapshot = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); @@ -1818,7 +1802,7 @@ public void testRecoveryUncommitted() throws IOException { } if (randomBoolean()) { // recover twice try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -1834,7 +1818,7 @@ public void testRecoveryUncommitted() throws IOException { translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snapshot = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); @@ -1880,7 +1864,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -1896,7 +1880,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snapshot = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); @@ -1908,7 +1892,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { if (randomBoolean()) { // recover twice try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -1924,7 +1908,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snapshot = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); @@ -1972,7 +1956,14 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); final TranslogCorruptedException translogCorruptedException = expectThrows( TranslogCorruptedException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) + () -> new LocalTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {} + ) ); assertThat( translogCorruptedException.getMessage(), @@ -1991,7 +1982,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { StandardOpenOption.TRUNCATE_EXISTING ); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -2007,7 +1998,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); - try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { + try (Translog.Snapshot snapshot = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); @@ -2152,7 +2143,7 @@ Collection operations() { public void testRandomExceptionsOnTrimOperations() throws Exception { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); fail.failNever(); TranslogConfig config = getTranslogConfig(tempDir); List fileChannels = new ArrayList<>(); @@ -2289,7 +2280,7 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog( + new LocalTranslog( config, foreignTranslog, createTranslogDeletionPolicy(), @@ -2301,7 +2292,7 @@ public void testOpenForeignTranslog() throws IOException { } catch (TranslogCorruptedException ex) { } - this.translog = new Translog( + this.translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -2335,7 +2326,7 @@ public void testCloseConcurrently() throws Throwable { int threadCount = 2 + randomInt(5); logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread); - final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); + final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); Thread[] threads = new Thread[threadCount]; final Exception[] threadExceptions = new Exception[threadCount]; @@ -2369,11 +2360,11 @@ public void testCloseConcurrently() throws Throwable { } } - private class TranslogThread extends Thread { + class TranslogThread extends Thread { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; - private final Collection writtenOperations; + private final Collection writtenOperations; private final Exception[] threadExceptions; private final Translog translog; private final AtomicLong seqNoGenerator; @@ -2383,7 +2374,7 @@ private class TranslogThread extends Thread { CountDownLatch downLatch, int opsPerThread, int threadId, - Collection writtenOperations, + Collection writtenOperations, AtomicLong seqNoGenerator, Exception[] threadExceptions ) { @@ -2429,7 +2420,7 @@ public void run() { } Translog.Location loc = add(op); - writtenOperations.add(new LocationOperation(op, loc)); + writtenOperations.add(new TestTranslog.LocationOperation(op, loc)); if (rarely()) { // lets verify we can concurrently read this assertEquals(op, translog.readOperation(loc)); } @@ -2449,7 +2440,7 @@ protected void afterAdd() throws IOException {} public void testFailFlush() throws IOException { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config); @@ -2531,7 +2522,7 @@ public void testFailFlush() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try ( - Translog tlog = new Translog( + Translog tlog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -2595,7 +2586,7 @@ public void testTranslogOpsCountIsCorrect() throws IOException { public void testTragicEventCanBeAnyException() throws IOException { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy()); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly @@ -2623,7 +2614,7 @@ public void testTragicEventCanBeAnyException() throws IOException { public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config); @@ -2635,7 +2626,7 @@ public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, final CountDownLatch downLatch = new CountDownLatch(1); final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100)); final AtomicLong seqNoGenerator = new AtomicLong(); - List writtenOperations = Collections.synchronizedList(new ArrayList<>()); + List writtenOperations = Collections.synchronizedList(new ArrayList<>()); for (int i = 0; i < threadCount; i++) { final int threadId = i; threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, seqNoGenerator, threadExceptions) { @@ -2688,7 +2679,7 @@ protected void afterAdd() throws IOException { // drop all that haven't been synced writtenOperations.removeIf(next -> checkpoint.offset < (next.location.translogLocation + next.location.size)); try ( - Translog tlog = new Translog( + Translog tlog = new LocalTranslog( config, translogUUID, createTranslogDeletionPolicy(), @@ -2751,7 +2742,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { TranslogConfig config = translog.getConfig(); final TranslogDeletionPolicy deletionPolicy = new DefaultTranslogDeletionPolicy(-1, -1, 0); deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); - translog = new Translog( + translog = new LocalTranslog( config, translog.getTranslogUUID(), deletionPolicy, @@ -2776,7 +2767,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { */ public void testRecoveryFromFailureOnTrimming() throws IOException { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); fail.failNever(); final TranslogConfig config = getTranslogConfig(tempDir); final long localCheckpoint; @@ -2820,7 +2811,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final TranslogDeletionPolicy deletionPolicy = new DefaultTranslogDeletionPolicy(-1, -1, 0); deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -2840,46 +2831,12 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { } } - private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException { + private Translog getFailableTranslog(TestTranslog.FailSwitch fail, final TranslogConfig config) throws IOException { return getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy()); } - private static class FailSwitch { - private volatile int failRate; - private volatile boolean onceFailedFailAlways = false; - - public boolean fail() { - final int rnd = randomIntBetween(1, 100); - boolean fail = rnd <= failRate; - if (fail && onceFailedFailAlways) { - failAlways(); - } - return fail; - } - - public void failNever() { - failRate = 0; - } - - public void failAlways() { - failRate = 100; - } - - public void failRandomly() { - failRate = randomIntBetween(1, 100); - } - - public void failRate(int rate) { - failRate = rate; - } - - public void onceFailedFailAlways() { - onceFailedFailAlways = true; - } - } - private Translog getFailableTranslog( - final FailSwitch fail, + final TestTranslog.FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, @@ -2890,7 +2847,7 @@ private Translog getFailableTranslog( } private Translog getFailableTranslog( - final FailSwitch fail, + final TestTranslog.FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, @@ -2930,7 +2887,14 @@ private Translog getFailableTranslog( primaryTerm.get() ); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) { + return new LocalTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {} + ) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -2949,11 +2913,11 @@ void deleteReaderFiles(TranslogReader reader) { } public static class ThrowingFileChannel extends FilterFileChannel { - private final FailSwitch fail; + private final TestTranslog.FailSwitch fail; private final boolean partialWrite; private final boolean throwUnknownException; - public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) + public ThrowingFileChannel(TestTranslog.FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException { super(delegate); this.fail = fail; @@ -3068,7 +3032,7 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { translog.add(new Translog.Index("boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog( + new LocalTranslog( config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), @@ -3136,7 +3100,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { TranslogException ex = expectThrows( TranslogException.class, - () -> new Translog( + () -> new LocalTranslog( config, translog.getTranslogUUID(), translog.getDeletionPolicy(), @@ -3163,7 +3127,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); try ( - Translog tlog = new Translog( + Translog tlog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -3185,7 +3149,14 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { TranslogException ex = expectThrows( TranslogException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) + () -> new LocalTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {} + ) ); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); @@ -3200,7 +3171,7 @@ public void testWithRandomException() throws IOException { final int runs = randomIntBetween(5, 10); for (int run = 0; run < runs; run++) { Path tempDir = createTempDir(); - final FailSwitch fail = new FailSwitch(); + final TestTranslog.FailSwitch fail = new TestTranslog.FailSwitch(); fail.failRandomly(); TranslogConfig config = getTranslogConfig(tempDir); final int numOps = randomIntBetween(100, 200); @@ -3308,7 +3279,7 @@ public void testWithRandomException() throws IOException { ); } try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, generationUUID, deletionPolicy, @@ -3370,7 +3341,7 @@ public void testCheckpointOnDiskFull() throws IOException { throw new MockDirectoryWrapper.FakeIOException(); } FileChannel open = FileChannel.open(p, o); - FailSwitch failSwitch = new FailSwitch(); + TestTranslog.FailSwitch failSwitch = new TestTranslog.FailSwitch(); failSwitch.failNever(); // don't fail in the ctor ThrowingFileChannel channel = new ThrowingFileChannel(failSwitch, false, false, open); failSwitch.failAlways(); @@ -3403,7 +3374,7 @@ public void testPendingDelete() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog( + translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -3417,7 +3388,7 @@ public void testPendingDelete() throws IOException { translog.add(new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 3 })); translog.close(); IOUtils.close(lock); - translog = new Translog( + translog = new LocalTranslog( config, translogUUID, deletionPolicy, @@ -3776,7 +3747,7 @@ public void testCloseSnapshotTwice() throws Exception { // close method should never be called directly from Translog (the only exception is closeOnTragicEvent) public void testTranslogCloseInvariant() throws IOException { assumeTrue("test only works with assertions enabled", Assertions.ENABLED); - class MisbehavingTranslog extends Translog { + class MisbehavingTranslog extends LocalTranslog { MisbehavingTranslog( TranslogConfig config, String translogUUID, @@ -3856,41 +3827,6 @@ public void testMaxSeqNo() throws Exception { assertThat(translog.getMaxSeqNo(), equalTo(expectedMaxSeqNo)); } - static class SortedSnapshot implements Translog.Snapshot { - private final Translog.Snapshot snapshot; - private List operations = null; - - SortedSnapshot(Translog.Snapshot snapshot) { - this.snapshot = snapshot; - } - - @Override - public int totalOperations() { - return snapshot.totalOperations(); - } - - @Override - public Translog.Operation next() throws IOException { - if (operations == null) { - operations = new ArrayList<>(); - Translog.Operation op; - while ((op = snapshot.next()) != null) { - operations.add(op); - } - operations.sort(Comparator.comparing(Translog.Operation::seqNo)); - } - if (operations.isEmpty()) { - return null; - } - return operations.remove(0); - } - - @Override - public void close() throws IOException { - snapshot.close(); - } - } - public void testCrashDuringCheckpointCopy() throws IOException { final Path path = createTempDir(); final AtomicBoolean failOnCopy = new AtomicBoolean(); @@ -3919,7 +3855,7 @@ public void copy(Path source, Path target, CopyOption... options) throws IOExcep assertFalse(brokenTranslog.isOpen()); try ( - Translog recoveredTranslog = new Translog( + Translog recoveredTranslog = new LocalTranslog( getTranslogConfig(path), brokenTranslog.getTranslogUUID(), brokenTranslog.getDeletionPolicy(), @@ -3953,7 +3889,7 @@ public void testSyncConcurrently() throws Exception { } }; try ( - Translog translog = new Translog( + Translog translog = new LocalTranslog( config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), @@ -4034,7 +3970,7 @@ public void force(boolean metaData) throws IOException { channelFactory, primaryTerm.get() ); - final Translog translog = new Translog( + final Translog translog = new LocalTranslog( config, translogUUID, createTranslogDeletionPolicy(), diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java new file mode 100644 index 0000000000000..36753c1559a7f --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java @@ -0,0 +1,1262 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.tests.mockfile.FilterFileChannel; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.junit.After; +import org.junit.Before; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.MissingHistoryOperationsException; +import org.opensearch.index.seqno.LocalCheckpointTracker; +import org.opensearch.index.seqno.LocalCheckpointTrackerTests; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongConsumer; +import java.util.zip.CRC32; +import java.util.zip.CheckedInputStream; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; +import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; + +@LuceneTestCase.SuppressFileSystems("ExtrasFS") + +public class RemoteFSTranslogTests extends OpenSearchTestCase { + + protected final ShardId shardId = new ShardId("index", "_na_", 1); + + protected RemoteFsTranslog translog; + private AtomicLong globalCheckpoint; + protected Path translogDir; + // A default primary term is used by translog instances created in this test. + private final AtomicLong primaryTerm = new AtomicLong(); + private final AtomicReference persistedSeqNoConsumer = new AtomicReference<>(); + private ThreadPool threadPool; + + BlobStoreRepository repository; + + BlobStoreTransferService blobStoreTransferService; + + TestTranslog.FailSwitch fail; + + private LongConsumer getPersistedSeqNoConsumer() { + return seqNo -> { + final LongConsumer consumer = persistedSeqNoConsumer.get(); + if (consumer != null) { + consumer.accept(seqNo); + } + }; + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE)); + // if a previous test failed we clean up things here + translogDir = createTempDir(); + translog = create(translogDir); + } + + @Override + @After + public void tearDown() throws Exception { + try { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + translog.close(); + } finally { + super.tearDown(); + terminate(threadPool); + } + } + + private RemoteFsTranslog create(Path path) throws IOException { + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + repository = createRepository(); + threadPool = new TestThreadPool(getClass().getName()); + blobStoreTransferService = new BlobStoreTransferService( + repository.blobStore(), + threadPool.executor(ThreadPool.Names.TRANSLOG_TRANSFER) + ); + return new RemoteFsTranslog( + translogConfig, + translogUUID, + deletionPolicy, + () -> globalCheckpoint.get(), + primaryTerm::get, + getPersistedSeqNoConsumer(), + repository, + threadPool.executor(ThreadPool.Names.TRANSLOG_TRANSFER) + ); + + } + + private TranslogConfig getTranslogConfig(final Path path) { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + // only randomize between nog age retention and a long one, so failures will have a chance of reproducing + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") + .build(); + return getTranslogConfig(path, settings); + } + + private TranslogConfig getTranslogConfig(final Path path, final Settings settings) { + final ByteSizeValue bufferSize = randomFrom( + TranslogConfig.DEFAULT_BUFFER_SIZE, + new ByteSizeValue(8, ByteSizeUnit.KB), + new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) + ); + + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + } + + private BlobStoreRepository createRepository() { + Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + fail = new TestTranslog.FailSwitch(); + fail.failNever(); + final FsRepository repository = new ThrowingBlobRepository( + repositoryMetadata, + createEnvironment(), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + fail + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + /** Create a {@link Environment} with random path.home and path.repo **/ + private Environment createEnvironment() { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) + .build() + ); + } + + private Translog.Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) + throws IOException { + Translog.Location loc = translog.add(op); + Random random = random(); + if (random.nextBoolean()) { + translog.ensureSynced(loc); + } + list.add(op); + return loc; + } + + private Translog.Location addToTranslogAndListAndUpload(Translog translog, List list, Translog.Operation op) + throws IOException { + Translog.Location loc = translog.add(op); + translog.ensureSynced(loc); + list.add(op); + return loc; + } + + public void testSimpleOperations() throws IOException { + ArrayList ops = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + + addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + addToTranslogAndList(translog, ops, new Translog.Delete("2", 1, primaryTerm.get())); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + assertThat(snapshot, containsOperationsInAnyOrder(ops)); + } + + final long seqNo = randomLongBetween(0, Integer.MAX_VALUE); + final String reason = randomAlphaOfLength(16); + final long noopTerm = randomLongBetween(1, primaryTerm.get()); + addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason)); + + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, containsOperationsInAnyOrder(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + try (Translog.Snapshot snapshot = translog.newSnapshot(seqNo + 1, randomLongBetween(seqNo + 1, Long.MAX_VALUE))) { + assertThat(snapshot, SnapshotMatchers.size(0)); + assertThat(snapshot.totalOperations(), equalTo(0)); + } + + } + + public void testReadLocation() throws IOException { + ArrayList ops = new ArrayList<>(); + ArrayList locs = new ArrayList<>(); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); + translog.sync(); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + assertNull(translog.readOperation(new Translog.Location(100, 0, 0))); + } + + public void testSnapshotWithNewTranslog() throws IOException { + List toClose = new ArrayList<>(); + try { + ArrayList ops = new ArrayList<>(); + Translog.Snapshot snapshot = translog.newSnapshot(); + toClose.add(snapshot); + assertThat(snapshot, SnapshotMatchers.size(0)); + + addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + Translog.Snapshot snapshot1 = translog.newSnapshot(); + toClose.add(snapshot1); + + addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 2 })); + + assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); + + translog.rollGeneration(); + addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 3 })); + + Translog.Snapshot snapshot2 = translog.newSnapshot(); + toClose.add(snapshot2); + translog.getDeletionPolicy().setLocalCheckpointOfSafeCommit(2); + assertThat(snapshot2, containsOperationsInAnyOrder(ops)); + assertThat(snapshot2.totalOperations(), equalTo(ops.size())); + } finally { + IOUtils.closeWhileHandlingException(toClose); + } + } + + public void testSnapshotOnClosedTranslog() throws IOException { + assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); + translog.add(new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + translog.close(); + AlreadyClosedException ex = expectThrows(AlreadyClosedException.class, () -> translog.newSnapshot()); + assertEquals(ex.getMessage(), "translog is already closed"); + } + + public void testRangeSnapshot() throws Exception { + long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final int generations = between(2, 20); + Map> operationsByGen = new HashMap<>(); + for (int gen = 0; gen < generations; gen++) { + Set seqNos = new HashSet<>(); + int numOps = randomIntBetween(1, 100); + for (int i = 0; i < numOps; i++) { + final long seqNo = randomValueOtherThanMany(seqNos::contains, () -> randomLongBetween(0, 1000)); + minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); + maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); + seqNos.add(seqNo); + } + List ops = new ArrayList<>(seqNos.size()); + for (long seqNo : seqNos) { + Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); + translog.add(op); + ops.add(op); + } + operationsByGen.put(translog.currentFileGeneration(), ops); + translog.rollGeneration(); + if (rarely()) { + translog.rollGeneration(); // empty generation + } + } + + if (minSeqNo > 0) { + long fromSeqNo = randomLongBetween(0, minSeqNo - 1); + long toSeqNo = randomLongBetween(fromSeqNo, minSeqNo - 1); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + } + + long fromSeqNo = randomLongBetween(maxSeqNo + 1, Long.MAX_VALUE); + long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + + fromSeqNo = randomLongBetween(0, 2000); + toSeqNo = randomLongBetween(fromSeqNo, 2000); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + Set seenSeqNos = new HashSet<>(); + List expectedOps = new ArrayList<>(); + for (long gen = translog.currentFileGeneration(); gen > 0; gen--) { + for (Translog.Operation op : operationsByGen.getOrDefault(gen, Collections.emptyList())) { + if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && seenSeqNos.add(op.seqNo())) { + expectedOps.add(op); + } + } + } + assertThat(TestTranslog.drainSnapshot(snapshot, false), equalTo(expectedOps)); + } + } + + public void testSimpleOperationsUpload() throws IOException { + ArrayList ops = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + assertEquals(translog.allUploaded().size(), 4); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); + assertEquals(translog.allUploaded().size(), 6); + + translog.rollGeneration(); + assertEquals(translog.allUploaded().size(), 6); + + Set mdFiles = blobStoreTransferService.listAll( + repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add("metadata") + ); + assertEquals(mdFiles.size(), 2); + logger.info("All md files {}", mdFiles); + + Set tlogFiles = blobStoreTransferService.listAll( + repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(String.valueOf(primaryTerm.get())) + ); + logger.info("All data files {}", tlogFiles); + + // assert content of ckp and tlog files + BlobPath path = repository.basePath() + .add(shardId.getIndex().getUUID()) + .add(String.valueOf(shardId.id())) + .add(String.valueOf(primaryTerm.get())); + for (TranslogReader reader : translog.readers) { + final long readerGeneration = reader.getGeneration(); + logger.error("Asserting content of {}", readerGeneration); + Path translogPath = reader.path(); + try ( + InputStream stream = new CheckedInputStream(Files.newInputStream(translogPath), new CRC32()); + InputStream tlogStream = blobStoreTransferService.downloadBlob(path, Translog.getFilename(readerGeneration)); + ) { + byte[] content = stream.readAllBytes(); + byte[] tlog = tlogStream.readAllBytes(); + assertArrayEquals(tlog, content); + } + + Path checkpointPath = translog.location().resolve(Translog.getCommitCheckpointFileName(readerGeneration)); + try ( + CheckedInputStream stream = new CheckedInputStream(Files.newInputStream(checkpointPath), new CRC32()); + InputStream ckpStream = blobStoreTransferService.downloadBlob(path, Translog.getCommitCheckpointFileName(readerGeneration)) + ) { + byte[] content = stream.readAllBytes(); + byte[] ckp = ckpStream.readAllBytes(); + assertArrayEquals(ckp, content); + } + } + } + + private Long populateTranslogOps(boolean withMissingOps) throws IOException { + long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final int generations = between(2, 20); + long currentSeqNo = 0L; + List firstGenOps = null; + Map> operationsByGen = new HashMap<>(); + for (int gen = 0; gen < generations; gen++) { + List seqNos = new ArrayList<>(); + int numOps = randomIntBetween(4, 10); + for (int i = 0; i < numOps; i++, currentSeqNo++) { + minSeqNo = SequenceNumbers.min(minSeqNo, currentSeqNo); + maxSeqNo = SequenceNumbers.max(maxSeqNo, currentSeqNo); + seqNos.add(currentSeqNo); + } + Collections.shuffle(seqNos, new Random(100)); + List ops = new ArrayList<>(seqNos.size()); + for (long seqNo : seqNos) { + Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); + boolean shouldAdd = !withMissingOps || seqNo % 4 != 0; + if (shouldAdd) { + translog.add(op); + ops.add(op); + } + } + operationsByGen.put(translog.currentFileGeneration(), ops); + if (firstGenOps == null) { + firstGenOps = ops; + } + translog.rollGeneration(); + if (rarely()) { + translog.rollGeneration(); // empty generation + } + } + return currentSeqNo; + } + + public void testFullRangeSnapshot() throws Exception { + // Successful snapshot + long nextSeqNo = populateTranslogOps(false); + long fromSeqNo = 0L; + long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { + int totOps = 0; + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + totOps++; + } + assertEquals(totOps, toSeqNo - fromSeqNo + 1); + } + } + + public void testFullRangeSnapshotWithFailures() throws Exception { + long nextSeqNo = populateTranslogOps(true); + long fromSeqNo = 0L; + long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { + int totOps = 0; + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + totOps++; + } + fail("Should throw exception for missing operations"); + } catch (MissingHistoryOperationsException e) { + assertTrue(e.getMessage().contains("Not all operations between from_seqno")); + } + } + + public void testConcurrentWritesWithVaryingSize() throws Throwable { + final int opsPerThread = randomIntBetween(10, 200); + int threadCount = 2 + randomInt(5); + + logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread); + final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); + + Thread[] threads = new Thread[threadCount]; + final Exception[] threadExceptions = new Exception[threadCount]; + final AtomicLong seqNoGenerator = new AtomicLong(); + final CountDownLatch downLatch = new CountDownLatch(1); + for (int i = 0; i < threadCount; i++) { + final int threadId = i; + threads[i] = new TranslogThread( + translog, + downLatch, + opsPerThread, + threadId, + writtenOperations, + seqNoGenerator, + threadExceptions + ); + threads[i].setDaemon(true); + threads[i].start(); + } + + downLatch.countDown(); + + for (int i = 0; i < threadCount; i++) { + if (threadExceptions[i] != null) { + throw threadExceptions[i]; + } + threads[i].join(60 * 1000); + } + + List collect = new ArrayList<>(writtenOperations); + collect.sort(Comparator.comparing(op -> op.operation.seqNo())); + + List opsList = new ArrayList<>(threadCount * opsPerThread); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + opsList.add(op); + } + } + opsList.sort(Comparator.comparing(op -> op.seqNo())); + + for (int i = 0; i < threadCount * opsPerThread; i++) { + assertEquals(opsList.get(i), collect.get(i).operation); + } + } + + /** + * Tests that concurrent readers and writes maintain view and snapshot semantics + */ + public void testConcurrentWriteViewsAndSnapshot() throws Throwable { + final Thread[] writers = new Thread[randomIntBetween(1, 3)]; + final Thread[] readers = new Thread[randomIntBetween(1, 3)]; + final int flushEveryOps = randomIntBetween(5, 100); + final int maxOps = randomIntBetween(200, 1000); + final Object signalReaderSomeDataWasIndexed = new Object(); + final AtomicLong idGenerator = new AtomicLong(); + final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1); + + // a map of all written ops and their returned location. + final Map writtenOps = ConcurrentCollections.newConcurrentMap(); + + // a signal for all threads to stop + final AtomicBoolean run = new AtomicBoolean(true); + + final Object flushMutex = new Object(); + final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker(); + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); + // any errors on threads + final List errors = new CopyOnWriteArrayList<>(); + logger.info("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); + for (int i = 0; i < writers.length; i++) { + final String threadName = "writer_" + i; + final int threadId = i; + writers[i] = new Thread(new AbstractRunnable() { + @Override + public void doRun() throws BrokenBarrierException, InterruptedException, IOException { + barrier.await(); + int counter = 0; + while (run.get() && idGenerator.get() < maxOps) { + long id = idGenerator.getAndIncrement(); + final Translog.Operation op; + final Translog.Operation.Type type = Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type + .values().length))]; + switch (type) { + case CREATE: + case INDEX: + op = new Translog.Index("" + id, id, primaryTerm.get(), new byte[] { (byte) id }); + break; + case DELETE: + op = new Translog.Delete(Long.toString(id), id, primaryTerm.get()); + break; + case NO_OP: + op = new Translog.NoOp(id, 1, Long.toString(id)); + break; + default: + throw new AssertionError("unsupported operation type [" + type + "]"); + } + Translog.Location location = translog.add(op); + tracker.markSeqNoAsProcessed(id); + Translog.Location existing = writtenOps.put(op, location); + if (existing != null) { + fail("duplicate op [" + op + "], old entry at " + location); + } + if (id % writers.length == threadId) { + translog.ensureSynced(location); + } + if (id % flushEveryOps == 0) { + synchronized (flushMutex) { + // we need not do this concurrently as we need to make sure that the generation + // we're committing - is still present when we're committing + long localCheckpoint = tracker.getProcessedCheckpoint(); + translog.rollGeneration(); + // expose the new checkpoint (simulating a commit), before we trim the translog + lastCommittedLocalCheckpoint.set(localCheckpoint); + deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); + translog.trimUnreferencedReaders(); + } + } + if (id % 7 == 0) { + synchronized (signalReaderSomeDataWasIndexed) { + signalReaderSomeDataWasIndexed.notifyAll(); + } + } + counter++; + } + logger.info("--> [{}] done. wrote [{}] ops.", threadName, counter); + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); + errors.add(e); + } + }, threadName); + writers[i].start(); + } + + for (int i = 0; i < readers.length; i++) { + final String threadId = "reader_" + i; + readers[i] = new Thread(new AbstractRunnable() { + Closeable retentionLock = null; + long committedLocalCheckpointAtView; + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); + errors.add(e); + try { + closeRetentionLock(); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.error("unexpected error while closing view, after failure", inner); + } + } + + void closeRetentionLock() throws IOException { + if (retentionLock != null) { + retentionLock.close(); + } + } + + void acquireRetentionLock() throws IOException { + closeRetentionLock(); + retentionLock = translog.acquireRetentionLock(); + // captures the last committed checkpoint, while holding the view, simulating + // recovery logic which captures a view and gets a lucene commit + committedLocalCheckpointAtView = lastCommittedLocalCheckpoint.get(); + logger.info("--> [{}] min gen after acquiring lock [{}]", threadId, translog.getMinFileGeneration()); + } + + @Override + protected void doRun() throws Exception { + barrier.await(); + int iter = 0; + while (idGenerator.get() < maxOps) { + if (iter++ % 10 == 0) { + acquireRetentionLock(); + } + + // captures al views that are written since the view was created (with a small caveat see bellow) + // these are what we expect the snapshot to return (and potentially some more). + Set expectedOps = new HashSet<>(writtenOps.keySet()); + expectedOps.removeIf(op -> op.seqNo() <= committedLocalCheckpointAtView); + try (Translog.Snapshot snapshot = translog.newSnapshot(committedLocalCheckpointAtView + 1L, Long.MAX_VALUE)) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + expectedOps.remove(op); + } + } + if (expectedOps.isEmpty() == false) { + StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()) + .append(" operations from [") + .append(committedLocalCheckpointAtView + 1L) + .append("]"); + boolean failed = false; + for (Translog.Operation expectedOp : expectedOps) { + final Translog.Location loc = writtenOps.get(expectedOp); + failed = true; + missed.append("\n --> [").append(expectedOp).append("] written at ").append(loc); + } + if (failed) { + fail(missed.toString()); + } + } + // slow down things a bit and spread out testing.. + synchronized (signalReaderSomeDataWasIndexed) { + if (idGenerator.get() < maxOps) { + signalReaderSomeDataWasIndexed.wait(); + } + } + } + closeRetentionLock(); + logger.info("--> [{}] done. tested [{}] snapshots", threadId, iter); + } + }, threadId); + readers[i].start(); + } + + barrier.await(); + logger.debug("--> waiting for threads to stop"); + for (Thread thread : writers) { + thread.join(); + } + logger.debug("--> waiting for readers to stop"); + // force stopping, if all writers crashed + synchronized (signalReaderSomeDataWasIndexed) { + idGenerator.set(Long.MAX_VALUE); + signalReaderSomeDataWasIndexed.notifyAll(); + } + for (Thread thread : readers) { + thread.join(); + } + if (errors.size() > 0) { + Throwable e = errors.get(0); + for (Throwable suppress : errors.subList(1, errors.size())) { + e.addSuppressed(suppress); + } + throw e; + } + logger.info("--> test done. total ops written [{}]", writtenOps.size()); + } + + public void testSyncUpTo() throws IOException { + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + int seqNo = ++count; + final Translog.Location location = translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + if (randomBoolean()) { + assertTrue("at least one operation pending", translog.syncNeeded()); + assertTrue("this operation has not been synced", translog.ensureSynced(location)); + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + seqNo = ++count; + translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + assertTrue("one pending operation", translog.syncNeeded()); + assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now + assertTrue("we only synced a previous operation yet", translog.syncNeeded()); + } + if (rarely()) { + translog.rollGeneration(); + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } + + if (randomBoolean()) { + translog.sync(); + assertFalse("translog has been synced already", translog.ensureSynced(location)); + } + } + } + + public void testSyncUpFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + ArrayList locations = new ArrayList<>(); + for (int op = 0; op < translogOperations; op++) { + int seqNo = ++count; + final Translog.Location location = translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + if (randomBoolean()) { + fail.failAlways(); + try { + translog.ensureSynced(location); + fail("io exception expected"); + } catch (IOException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } else { + fail.failNever(); + translog.ensureSynced(location); + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } + locations.add(location); + + } + // clean up + fail.failNever(); + + // writes should get synced up now + translog.sync(); + assertFalse(translog.syncNeeded()); + for (Translog.Location location : locations) { + assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); + } + + } + + public void testSyncUpToStream() throws IOException { + int iters = randomIntBetween(5, 10); + for (int i = 0; i < iters; i++) { + int translogOperations = randomIntBetween(10, 100); + int count = 0; + ArrayList locations = new ArrayList<>(); + for (int op = 0; op < translogOperations; op++) { + if (rarely()) { + translog.rollGeneration(); + } + final Translog.Location location = translog.add( + new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) + ); + locations.add(location); + } + Collections.shuffle(locations, random()); + if (randomBoolean()) { + assertTrue("at least one operation pending", translog.syncNeeded()); + assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + } else if (rarely()) { + translog.rollGeneration(); + // not syncing now + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } else { + translog.sync(); + assertFalse("translog has been synced already", translog.ensureSynced(locations.stream())); + } + for (Translog.Location location : locations) { + assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); + } + } + } + + public void testLocationComparison() throws IOException { + List locations = new ArrayList<>(); + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + locations.add( + translog.add( + new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) + ) + ); + if (randomBoolean()) { + translog.ensureSynced(locations.get(op)); + } + if (rarely() && translogOperations > op + 1) { + translog.rollGeneration(); + } + } + Collections.shuffle(locations, random()); + Translog.Location max = locations.get(0); + for (Translog.Location location : locations) { + max = max(max, location); + } + + try (Translog.Snapshot snap = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { + Translog.Operation next; + Translog.Operation maxOp = null; + while ((next = snap.next()) != null) { + maxOp = next; + } + assertNotNull(maxOp); + assertEquals(maxOp.getSource().source.utf8ToString(), Integer.toString(count)); + } + } + + public static Translog.Location max(Translog.Location a, Translog.Location b) { + if (a.compareTo(b) > 0) { + return a; + } + return b; + } + + public void testTranslogWriter() throws IOException { + final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); + final Set persistedSeqNos = new HashSet<>(); + persistedSeqNoConsumer.set(persistedSeqNos::add); + final int numOps = scaledRandomIntBetween(8, 250000); + final Set seenSeqNos = new HashSet<>(); + boolean opsHaveValidSequenceNumbers = randomBoolean(); + for (int i = 0; i < numOps; i++) { + byte[] bytes = new byte[4]; + DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(i); + long seqNo; + do { + seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; + opsHaveValidSequenceNumbers = opsHaveValidSequenceNumbers || !rarely(); + } while (seenSeqNos.contains(seqNo)); + if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + seenSeqNos.add(seqNo); + } + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), seqNo); + } + assertThat(persistedSeqNos, empty()); + writer.sync(); + persistedSeqNos.remove(SequenceNumbers.UNASSIGNED_SEQ_NO); + assertEquals(seenSeqNos, persistedSeqNos); + + final BaseTranslogReader reader = randomBoolean() + ? writer + : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); + for (int i = 0; i < numOps; i++) { + ByteBuffer buffer = ByteBuffer.allocate(4); + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(i, value); + } + final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); + final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); + assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo)); + assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo)); + + byte[] bytes = new byte[4]; + DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(2048); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + + if (reader instanceof TranslogReader) { + ByteBuffer buffer = ByteBuffer.allocate(4); + try { + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * numOps); + fail("read past EOF?"); + } catch (EOFException ex) { + // expected + } + ((TranslogReader) reader).close(); + } else { + // live reader! + ByteBuffer buffer = ByteBuffer.allocate(4); + final long pos = reader.getFirstOperationOffset() + 4 * numOps; + reader.readBytes(buffer, pos); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(2048, value); + } + IOUtils.close(writer); + } + + public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB) + ); + + final Set persistedSeqNos = new HashSet<>(); + final AtomicInteger writeCalls = new AtomicInteger(); + + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = delegate; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public int write(ByteBuffer src) throws IOException { + writeCalls.incrementAndGet(); + return super.write(src); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + + try ( + Translog translog = new RemoteFsTranslog( + config, + translogUUID, + new DefaultTranslogDeletionPolicy(-1, -1, 0), + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool.executor(ThreadPool.Names.TRANSLOG_TRANSFER) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + TranslogWriter writer = translog.getCurrent(); + int initialWriteCalls = writeCalls.get(); + byte[] bytes = new byte[256]; + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 3); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 4); + assertThat(persistedSeqNos, empty()); + assertEquals(initialWriteCalls, writeCalls.get()); + + if (randomBoolean()) { + // Since the buffer is full, this will flush before performing the add. + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); + assertThat(persistedSeqNos, empty()); + assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); + } else { + // Will flush on read + writer.readBytes(ByteBuffer.allocate(256), 0); + assertThat(persistedSeqNos, empty()); + assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); + + // Add after we the read flushed the buffer + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); + } + + writer.sync(); + + // Sequence numbers are marked as persisted after sync + assertThat(persistedSeqNos, contains(1L, 2L, 3L, 4L, 5L)); + } + } + + public void testCloseIntoReader() throws IOException { + try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) { + final int numOps = randomIntBetween(8, 128); + for (int i = 0; i < numOps; i++) { + final byte[] bytes = new byte[4]; + final DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(i); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + } + writer.sync(); + final Checkpoint writerCheckpoint = writer.getCheckpoint(); + TranslogReader reader = writer.closeIntoReader(); + try { + if (randomBoolean()) { + reader.close(); + reader = translog.openReader(reader.path(), writerCheckpoint); + } + for (int i = 0; i < numOps; i++) { + final ByteBuffer buffer = ByteBuffer.allocate(4); + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(i, value); + } + final Checkpoint readerCheckpoint = reader.getCheckpoint(); + assertThat(readerCheckpoint, equalTo(writerCheckpoint)); + } finally { + IOUtils.close(reader); + } + } + } + + public class ThrowingBlobRepository extends FsRepository { + private final Environment environment; + + private TestTranslog.FailSwitch fail; + + public ThrowingBlobRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings, + TestTranslog.FailSwitch fail + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + this.environment = environment; + this.fail = fail; + } + + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail); + } + } + + private class ThrowingBlobStore extends FsBlobStore { + + private TestTranslog.FailSwitch fail; + + public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, TestTranslog.FailSwitch fail) throws IOException { + super(bufferSizeInBytes, path, readonly); + this.fail = fail; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + } + + private class ThrowingBlobContainer extends FsBlobContainer { + + private TestTranslog.FailSwitch fail; + + public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, TestTranslog.FailSwitch fail) { + super(blobStore, blobPath, path); + this.fail = fail; + } + + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + if (fail.fail()) { + throw new IOException("blob container throwing error"); + } + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } + + class TranslogThread extends Thread { + private final CountDownLatch downLatch; + private final int opsPerThread; + private final int threadId; + private final Collection writtenOperations; + private final Exception[] threadExceptions; + private final Translog translog; + private final AtomicLong seqNoGenerator; + + TranslogThread( + Translog translog, + CountDownLatch downLatch, + int opsPerThread, + int threadId, + Collection writtenOperations, + AtomicLong seqNoGenerator, + Exception[] threadExceptions + ) { + this.translog = translog; + this.downLatch = downLatch; + this.opsPerThread = opsPerThread; + this.threadId = threadId; + this.writtenOperations = writtenOperations; + this.seqNoGenerator = seqNoGenerator; + this.threadExceptions = threadExceptions; + } + + @Override + public void run() { + try { + downLatch.await(); + for (int opCount = 0; opCount < opsPerThread; opCount++) { + Translog.Operation op; + final Translog.Operation.Type type = randomFrom(Translog.Operation.Type.values()); + switch (type) { + case CREATE: + case INDEX: + op = new Translog.Index( + threadId + "_" + opCount, + seqNoGenerator.getAndIncrement(), + primaryTerm.get(), + randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8") + ); + break; + case DELETE: + op = new Translog.Delete( + threadId + "_" + opCount, + seqNoGenerator.getAndIncrement(), + primaryTerm.get(), + 1 + randomInt(100000) + ); + break; + case NO_OP: + op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16)); + break; + default: + throw new AssertionError("unsupported operation type [" + type + "]"); + } + + Translog.Location loc = add(op); + writtenOperations.add(new TestTranslog.LocationOperation(op, loc)); + if (rarely()) { // lets verify we can concurrently read this + assertEquals(op, translog.readOperation(loc)); + } + afterAdd(); + } + } catch (Exception t) { + threadExceptions[threadId] = t; + } + } + + protected Translog.Location add(Translog.Operation op) throws IOException { + Translog.Location location = translog.add(op); + if (randomBoolean()) { + translog.ensureSynced(location); + } + return location; + } + + protected void afterAdd() {} + } + +} diff --git a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java index 4e9d56aeb9573..3545f2dbf5ffc 100644 --- a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java @@ -38,6 +38,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.common.io.stream.InputStreamStreamInput; import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.ByteBuffer; @@ -283,4 +284,88 @@ static boolean isTranslogHeaderVersionFlipped(Path corruptedFile, FileChannel ch return false; } } + + static class LocationOperation implements Comparable { + final Translog.Operation operation; + final Translog.Location location; + + LocationOperation(Translog.Operation operation, Translog.Location location) { + this.operation = operation; + this.location = location; + } + + @Override + public int compareTo(LocationOperation o) { + return location.compareTo(o.location); + } + } + + static class FailSwitch { + private volatile int failRate; + private volatile boolean onceFailedFailAlways = false; + + public boolean fail() { + final int rnd = OpenSearchTestCase.randomIntBetween(1, 100); + boolean fail = rnd <= failRate; + if (fail && onceFailedFailAlways) { + failAlways(); + } + return fail; + } + + public void failNever() { + failRate = 0; + } + + public void failAlways() { + failRate = 100; + } + + public void failRandomly() { + failRate = OpenSearchTestCase.randomIntBetween(1, 100); + } + + public void failRate(int rate) { + failRate = rate; + } + + public void onceFailedFailAlways() { + onceFailedFailAlways = true; + } + } + + static class SortedSnapshot implements Translog.Snapshot { + private final Translog.Snapshot snapshot; + private List operations = null; + + SortedSnapshot(Translog.Snapshot snapshot) { + this.snapshot = snapshot; + } + + @Override + public int totalOperations() { + return snapshot.totalOperations(); + } + + @Override + public Translog.Operation next() throws IOException { + if (operations == null) { + operations = new ArrayList<>(); + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + } + operations.sort(Comparator.comparing(Translog.Operation::seqNo)); + } + if (operations.isEmpty()) { + return null; + } + return operations.remove(0); + } + + @Override + public void close() throws IOException { + snapshot.close(); + } + } } diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 25867cdb666ad..4c3948cbd7b5b 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -81,7 +81,7 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup shardId, primaryTermSupplier.getAsLong() ); - return new Translog( + return new LocalTranslog( translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index adca47bf64c64..5dc5ac92070ea 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -54,6 +54,16 @@ public void testUploadBlob() throws IOException { transferService.uploadBlob(transferFileSnapshot, repository.basePath()); } + public void testUploadBlobFromByteArray() throws IOException { + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( + "dummy_name", + randomByteArrayOfLength(128), + 1 + ); + TransferService transferService = new BlobStoreTransferService(repository.blobStore(), executorService); + transferService.uploadBlob(transferFileSnapshot, repository.basePath()); + } + public void testUploadBlobAsync() throws IOException, InterruptedException { Path testFile = createTempFile(); Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java new file mode 100644 index 0000000000000..c6b4579f5ddd1 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; + +public class FileTransferTrackerTests extends OpenSearchTestCase { + + protected final ShardId shardId = new ShardId("index", "_na_", 1); + FileTransferTracker fileTransferTracker; + + @Override + public void setUp() throws Exception { + super.setUp(); + } + + public void testOnSuccess() throws IOException { + fileTransferTracker = new FileTransferTracker(shardId); + Path testFile = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + try ( + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( + testFile, + randomNonNegativeLong() + ) + ) { + fileTransferTracker.onSuccess(transferFileSnapshot); + // idempotent + fileTransferTracker.onSuccess(transferFileSnapshot); + assertEquals(fileTransferTracker.allUploaded().size(), 1); + try { + fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); + fail("failure after succcess invalid"); + } catch (IllegalStateException ex) { + // all good + } + } + } + + public void testOnFailure() throws IOException { + fileTransferTracker = new FileTransferTracker(shardId); + Path testFile = createTempFile(); + Path testFile2 = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + try ( + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( + testFile, + randomNonNegativeLong() + ); + FileSnapshot.TransferFileSnapshot transferFileSnapshot2 = new FileSnapshot.TransferFileSnapshot( + testFile2, + randomNonNegativeLong() + ) + ) { + + fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); + fileTransferTracker.onSuccess(transferFileSnapshot2); + assertEquals(fileTransferTracker.allUploaded().size(), 1); + + fileTransferTracker.onSuccess(transferFileSnapshot); + assertEquals(fileTransferTracker.allUploaded().size(), 2); + } + } + +} diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index a3a7cbecc6958..cd941feb37002 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -132,6 +132,7 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.SNAPSHOT, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.FETCH_SHARD_STARTED, ThreadPool::twiceAllocatedProcessors); sizes.put(ThreadPool.Names.FETCH_SHARD_STORE, ThreadPool::twiceAllocatedProcessors); + sizes.put(ThreadPool.Names.TRANSLOG_TRANSFER, ThreadPool::halfAllocatedProcessorsMaxTen); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index ed1146d0e035f..f6010792bef91 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -112,6 +112,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.translog.InternalTranslogManager; +import org.opensearch.index.translog.LocalTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicy; @@ -150,9 +151,9 @@ import static java.util.Collections.shuffle; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.instanceOf; import static org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.opensearch.index.engine.Engine.Operation.Origin.REPLICA; @@ -525,7 +526,7 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup shardId, primaryTermSupplier.getAsLong() ); - return new Translog( + return new LocalTranslog( translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), From b8c9b9a30f9a0fda6d537bf0adb180e33cbba83b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Dec 2022 00:04:34 -0800 Subject: [PATCH 04/13] Bump geoip2 from 3.0.2 to 4.0.0 in /modules/ingest-geoip (#5634) * Bump geoip2 from 3.0.2 to 4.0.0 in /modules/ingest-geoip Bumps [geoip2](https://github.com/maxmind/GeoIP2-java) from 3.0.2 to 4.0.0. - [Release notes](https://github.com/maxmind/GeoIP2-java/releases) - [Changelog](https://github.com/maxmind/GeoIP2-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/maxmind/GeoIP2-java/compare/v3.0.2...v4.0.0) --- updated-dependencies: - dependency-name: com.maxmind.geoip2:geoip2 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- CHANGELOG.md | 2 +- modules/ingest-geoip/build.gradle | 2 +- modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 | 1 - modules/ingest-geoip/licenses/geoip2-4.0.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/geoip2-4.0.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 42cd569c9fb49..a8bb78c2455fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,7 +101,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps `azure-storage-blob` from 12.16.1 to 12.20.0 ([#4995](https://github.com/opensearch-project/OpenSearch/pull/4995)) - Bumps `commons-compress` from 1.21 to 1.22 ([#5104](https://github.com/opensearch-project/OpenSearch/pull/5104)) - Bump `opencensus-contrib-http-util` from 0.18.0 to 0.31.1 ([#3633](https://github.com/opensearch-project/OpenSearch/pull/3633)) -- Bump `geoip2` from 3.0.1 to 3.0.2 ([#5103](https://github.com/opensearch-project/OpenSearch/pull/5103)) +- Bump `geoip2` from 3.0.2 to 4.0.0 ([#5634](https://github.com/opensearch-project/OpenSearch/pull/5634)) - Bump gradle-extra-configurations-plugin from 7.0.0 to 8.0.0 ([#4808](https://github.com/opensearch-project/OpenSearch/pull/4808)) ### Changed diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index a560e79cc2e81..1e3e631415b07 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,7 +39,7 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:3.0.2') + api('com.maxmind.geoip2:geoip2:4.0.0') // geoip2 dependencies: api('com.maxmind.db:maxmind-db:3.0.0') diff --git a/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 deleted file mode 100644 index 2ff70cf499713..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0ab0a451309c93f0fb6bf3cb203ba19d452c800 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-4.0.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.0.0.jar.sha1 new file mode 100644 index 0000000000000..a0ed76b3cb30b --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-4.0.0.jar.sha1 @@ -0,0 +1 @@ +4d6811f8c07aa7f121edbf797d26907b879df5f5 \ No newline at end of file From 98ca4a7f655d0c4a2a2e8a0e0c03d40f13fbfffb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Dec 2022 01:22:16 -0800 Subject: [PATCH 05/13] Bump reactor-core from 3.4.23 to 3.5.1 in /plugins/repository-azure (#5604) * Bump reactor-core from 3.4.23 to 3.5.1 in /plugins/repository-azure Bumps [reactor-core](https://github.com/reactor/reactor-core) from 3.4.23 to 3.5.1. - [Release notes](https://github.com/reactor/reactor-core/releases) - [Commits](https://github.com/reactor/reactor-core/compare/v3.4.23...v3.5.1) --- updated-dependencies: - dependency-name: io.projectreactor:reactor-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- CHANGELOG.md | 8 ++------ plugins/repository-azure/build.gradle | 17 +++++------------ .../licenses/reactor-core-3.4.23.jar.sha1 | 1 - .../licenses/reactor-core-3.5.1.jar.sha1 | 1 + 4 files changed, 8 insertions(+), 19 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.23.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-core-3.5.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a8bb78c2455fc..346cf1c55cbeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,26 +30,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps `avro` from 1.11.0 to 1.11.1 - Bumps `woodstox-core` from 6.3.0 to 6.3.1 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 ([#4164](https://github.com/opensearch-project/OpenSearch/pull/4664)) - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 ([#4411](https://github.com/opensearch-project/OpenSearch/pull/4411)) - Bumps `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) - Bumps `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) - Bumps `com.diffplug.spotless` from 6.10.0 to 6.11.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) -- Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) +- Bumps `reactor-core` from 3.4.23 to 3.5.1 ([#5604](https://github.com/opensearch-project/OpenSearch/pull/5604)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `commons-compress` from 1.21 to 1.22 - Bumps `jcodings` from 1.0.57 to 1.0.58 ([#5233](https://github.com/opensearch-project/OpenSearch/pull/5233)) - Bumps `google-http-client-jackson2` from 1.35.0 to 1.42.3 ([#5234](https://github.com/opensearch-project/OpenSearch/pull/5234)) - Bumps `azure-core` from 1.33.0 to 1.34.0 ([#5235](https://github.com/opensearch-project/OpenSearch/pull/5235)) - Bumps `azure-core-http-netty` from 1.12.4 to 1.12.7 ([#5235](https://github.com/opensearch-project/OpenSearch/pull/5235)) -- Bumps `spock-core` from from 2.1-groovy-3.0 to 2.3-groovy-3.0 ([#5315](https://github.com/opensearch-project/OpenSearch/pull/5315)) +- Bumps `spock-core` from 2.1-groovy-3.0 to 2.3-groovy-3.0 ([#5315](https://github.com/opensearch-project/OpenSearch/pull/5315)) - Bumps `json-schema-validator` from 1.0.69 to 1.0.73 ([#5316](https://github.com/opensearch-project/OpenSearch/pull/5316)) - Bumps `proto-google-common-protos` from 2.8.0 to 2.10.0 ([#5318](https://github.com/opensearch-project/OpenSearch/pull/5318)) -- Bumps `protobuf-java` from 3.21.7 to 3.21.9 ([#5319](https://github.com/opensearch-project/OpenSearch/pull/5319)) -- Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) - Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Bumps `protobuf-java` from 3.21.9 to 3.21.11 in /plugins/repository-hdfs ([#5519](https://github.com/opensearch-project/OpenSearch/pull/5519)) - Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) - Bump antlr4 from 4.9.3 to 4.11.1 ([#4546](https://github.com/opensearch-project/OpenSearch/pull/4546)) - Bumps `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index d1f83806607bd..ba644d580bbcd 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.20.0' api 'org.reactivestreams:reactive-streams:1.0.4' - api 'io.projectreactor:reactor-core:3.4.23' + api 'io.projectreactor:reactor-core:3.5.1' api 'io.projectreactor.netty:reactor-netty:1.0.18' api 'io.projectreactor.netty:reactor-netty-core:1.0.24' api 'io.projectreactor.netty:reactor-netty-http:1.0.24' @@ -97,6 +97,10 @@ thirdPartyAudit { 'com.azure.storage.internal.avro.implementation.AvroReaderFactory', 'com.azure.storage.internal.avro.implementation.schema.AvroSchema', 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.context.ContextRegistry', + 'io.micrometer.context.ContextSnapshot', + 'io.micrometer.context.ContextSnapshot$Scope', 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', 'io.micrometer.core.instrument.Counter$Builder', @@ -156,17 +160,6 @@ thirdPartyAudit { 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter', 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter$DEFAULT', 'javax.xml.bind.annotation.adapters.XmlJavaTypeAdapters', - 'kotlin.collections.ArraysKt', - 'kotlin.jvm.JvmClassMappingKt', - 'kotlin.jvm.functions.Function0', - 'kotlin.jvm.functions.Function1', - 'kotlin.jvm.internal.FunctionReference', - 'kotlin.jvm.internal.Intrinsics', - 'kotlin.jvm.internal.Reflection', - 'kotlin.jvm.internal.markers.KMappedMarker', - 'kotlin.reflect.KClass', - 'kotlin.reflect.KDeclarationContainer', - 'kotlin.sequences.Sequence', 'org.osgi.framework.BundleActivator', 'org.osgi.framework.BundleContext', 'org.slf4j.impl.StaticLoggerBinder', diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.23.jar.sha1 deleted file mode 100644 index e398f1672188c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.4.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94bb06e2c4e6968c6faf4f5363cb278351f44a6a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.5.1.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.5.1.jar.sha1 new file mode 100644 index 0000000000000..697203d58ff36 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.5.1.jar.sha1 @@ -0,0 +1 @@ +d1e833c13320d3a3133be6a70a4f1a82466f65fe \ No newline at end of file From ea1cc9d054b2d7cf0046438ecc57777e29112c2b Mon Sep 17 00:00:00 2001 From: Dhwanil Patel Date: Tue, 27 Dec 2022 18:56:15 +0530 Subject: [PATCH 06/13] Add version check during task submission for bwc for static threshold setting (#5633) * Add version check during task submission for bwc for static threshold setting Signed-off-by: Dhwanil Patel --- CHANGELOG.md | 1 + .../org/opensearch/OpenSearchException.java | 2 +- .../service/ClusterManagerTaskThrottler.java | 30 ++++++++++++- .../ClusterManagerTaskThrottlerTests.java | 42 +++++++++++++++++++ 4 files changed, 73 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 346cf1c55cbeb..9ba7bfbb27bb0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix case sensitivity for wildcard queries ([#5462](https://github.com/opensearch-project/OpenSearch/pull/5462)) - Apply cluster manager throttling settings during bootstrap ([#5524](https://github.com/opensearch-project/OpenSearch/pull/5524)) - Update thresholds map when cluster manager throttling setting is removed ([#5524](https://github.com/opensearch-project/OpenSearch/pull/5524)) +- Fix backward compatibility for static cluster manager throttling threshold setting ([#5633](https://github.com/opensearch-project/OpenSearch/pull/5633)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 78f6b50b3a039..78e5dc044cbd8 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -1613,7 +1613,7 @@ private enum OpenSearchExceptionHandle { ClusterManagerThrottlingException.class, ClusterManagerThrottlingException::new, 165, - Version.V_2_4_0 + Version.V_2_5_0 ), SNAPSHOT_IN_USE_DELETION_EXCEPTION( SnapshotInUseDeletionException.class, diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index db2be505b2fbb..726963fe4b37d 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -22,6 +22,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; /** @@ -51,6 +52,11 @@ public class ClusterManagerTaskThrottler implements TaskBatcherListener { private final ConcurrentMap tasksThreshold; private final Supplier minNodeVersionSupplier; + // Once all nodes are greater than or equal 2.5.0 version, then only it will start throttling. + // During upgrade as well, it will wait for all older version nodes to leave the cluster before starting throttling. + // This is needed specifically for static setting to enable throttling. + private AtomicBoolean startThrottling = new AtomicBoolean(); + public ClusterManagerTaskThrottler( final Settings settings, final ClusterSettings clusterSettings, @@ -168,7 +174,7 @@ public void onBeginSubmit(List tasks) { int size = tasks.size(); if (clusterManagerThrottlingKey.isThrottlingEnabled()) { Long threshold = tasksThreshold.get(clusterManagerThrottlingKey.getTaskThrottlingKey()); - if (threshold != null && (count + size > threshold)) { + if (threshold != null && shouldThrottle(threshold, count, size)) { clusterManagerTaskThrottlerListener.onThrottle(clusterManagerThrottlingKey.getTaskThrottlingKey(), size); logger.warn( "Throwing Throttling Exception for [{}]. Trying to add [{}] tasks to queue, limit is set to [{}]", @@ -185,6 +191,28 @@ public void onBeginSubmit(List tasks) { }); } + /** + * If throttling thresholds are set via static setting, it will update the threshold map. + * It may start throwing throttling exception to older nodes in cluster. + * Older version nodes will not be equipped to handle the throttling exception and + * this may result in unexpected behavior where internal tasks would start failing without any retries. + * + * For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag. + * Once the startThrottling flag is set, it will not perform check for next set of tasks. + */ + private boolean shouldThrottle(Long threshold, Long count, int size) { + if (!startThrottling.get()) { + if (minNodeVersionSupplier.get().compareTo(Version.V_2_5_0) >= 0) { + startThrottling.compareAndSet(false, true); + logger.info("Starting cluster manager throttling as all nodes are higher than or equal to 2.5.0"); + } else { + logger.info("Skipping cluster manager throttling as at least one node < 2.5.0 is present in cluster"); + return false; + } + } + return count + size > threshold; + } + @Override public void onSubmitFailure(List tasks) { reduceTaskCount(tasks); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index 63ed54ddf5238..0acdbffe3dc4f 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -324,6 +324,48 @@ public void testThrottlingForDisabledThrottlingTask() { assertEquals(0L, throttlingStats.getThrottlingCount(taskKey)); } + public void testThrottlingForInitialStaticSettingAndVersionCheck() { + ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); + DiscoveryNode clusterManagerNode = getClusterManagerNode(Version.V_2_5_0); + DiscoveryNode dataNode = getDataNode(Version.V_2_4_0); + setState( + clusterService, + ClusterStateCreationUtils.state(clusterManagerNode, clusterManagerNode, new DiscoveryNode[] { clusterManagerNode, dataNode }) + ); + + // setting threshold in initial settings + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + int put_mapping_threshold_value = randomIntBetween(1, 10); + Settings initialSettings = Settings.builder() + .put("cluster_manager.throttling.thresholds.put-mapping.value", put_mapping_threshold_value) + .build(); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( + initialSettings, + clusterSettings, + () -> { return clusterService.getMasterService().getMinNodeVersion(); }, + throttlingStats + ); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask("put-mapping", true); + + // verifying adding more tasks then threshold passes + throttler.onBeginSubmit(getMockUpdateTaskList("put-mapping", throttlingKey, put_mapping_threshold_value + 5)); + assertEquals(0L, throttlingStats.getThrottlingCount("put-mapping")); + + // Removing older version node from cluster + setState( + clusterService, + ClusterStateCreationUtils.state(clusterManagerNode, clusterManagerNode, new DiscoveryNode[] { clusterManagerNode }) + ); + + // adding more tasks, these tasks should be throttled + // As queue already have more tasks than threshold from previous call. + assertThrows( + ClusterManagerThrottlingException.class, + () -> throttler.onBeginSubmit(getMockUpdateTaskList("put-mapping", throttlingKey, 3)) + ); + assertEquals(3L, throttlingStats.getThrottlingCount("put-mapping")); + } + public void testThrottling() { ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); String taskKey = "test"; From d388051e7fd53c9030a38dc24e333cea3dbc0b65 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Wed, 28 Dec 2022 18:21:05 +0530 Subject: [PATCH 07/13] Enable creation of indices using Remote Translog (#5638) * Enable creation of indices using Remote Translog behind a setting and feature flag Signed-off-by: Gaurav Bafna --- .../cluster/metadata/IndexMetadata.java | 40 +++++++++++ .../common/settings/IndexScopedSettings.java | 3 +- .../org/opensearch/index/IndexModule.java | 6 +- .../org/opensearch/index/IndexService.java | 19 ++++- .../org/opensearch/index/IndexSettings.java | 6 ++ .../RemoveCorruptedShardDataCommand.java | 5 ++ ...emoteBlobStoreInternalTranslogFactory.java | 72 +++++++++++++++++++ .../opensearch/indices/IndicesService.java | 13 +++- .../main/java/org/opensearch/node/Node.java | 7 +- .../opensearch/index/IndexModuleTests.java | 6 +- .../opensearch/index/IndexSettingsTests.java | 52 ++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 9 ++- 12 files changed, 222 insertions(+), 16 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index dcc0ea33e6131..d32b933b558f0 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -297,6 +297,9 @@ public Iterator> settings() { public static final String SETTING_REMOTE_STORE_REPOSITORY = "index.remote_store.repository"; public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; + + public static final String SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY = "index.remote_store.translog.repository"; + /** * Used to specify if the index data should be persisted in the remote store. */ @@ -405,6 +408,43 @@ public Iterator> settings() { Property.Final ); + public static final Setting INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING = Setting.simpleString( + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, + new Setting.Validator<>() { + + @Override + public void validate(final String value) {} + + @Override + public void validate(final String value, final Map, Object> settings) { + if (value == null || value.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + ); + } else { + final Boolean isRemoteTranslogStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); + if (isRemoteTranslogStoreEnabled == null || isRemoteTranslogStoreEnabled == false) { + throw new IllegalArgumentException( + "Settings " + + INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey() + + " can only be set/enabled when " + + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() + + " is set to true" + ); + } + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 079fc38415328..1efce2eba8867 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -225,8 +225,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REMOTE_STORE, List.of( IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING ), FeatureFlags.SEARCHABLE_SNAPSHOT, List.of( diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 9f7e3e9fb5eee..69543577f48b4 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -492,7 +492,8 @@ public IndexService newIndexService( NamedWriteableRegistry namedWriteableRegistry, BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + Supplier repositoriesServiceSupplier ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -547,7 +548,8 @@ public IndexService newIndexService( allowExpensiveQueries, expressionResolver, valuesSourceRegistry, - recoveryStateFactory + recoveryStateFactory, + repositoriesServiceSupplier ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 92f957633db84..36237b56987e6 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -89,7 +89,9 @@ import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.Store; import org.opensearch.index.translog.InternalTranslogFactory; +import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -97,6 +99,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -173,6 +176,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final IndexNameExpressionResolver expressionResolver; private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; + private final Supplier repositoriesServiceSupplier; public IndexService( IndexSettings indexSettings, @@ -204,7 +208,8 @@ public IndexService( BooleanSupplier allowExpensiveQueries, IndexNameExpressionResolver expressionResolver, ValuesSourceRegistry valuesSourceRegistry, - IndexStorePlugin.RecoveryStateFactory recoveryStateFactory + IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, + Supplier repositoriesServiceSupplier ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -276,6 +281,7 @@ public IndexService( this.trimTranslogTask = new AsyncTrimTranslogTask(this); this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); + this.repositoriesServiceSupplier = repositoriesServiceSupplier; updateFsyncTaskIfNecessary(); } @@ -518,6 +524,14 @@ public synchronized IndexShard createShard( remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY); } + TranslogFactory translogFactory = this.indexSettings.isRemoteTranslogStoreEnabled() && routing.primary() + ? new RemoteBlobStoreInternalTranslogFactory( + repositoriesServiceSupplier, + threadPool, + this.indexSettings.getRemoteStoreTranslogRepository() + ) + : new InternalTranslogFactory(); + Directory directory = directoryFactory.newDirectory(this.indexSettings, path); store = new Store( shardId, @@ -548,8 +562,7 @@ public synchronized IndexShard createShard( () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService, - // TODO Replace with remote translog factory in the follow up PR - this.indexSettings.isRemoteTranslogStoreEnabled() ? null : new InternalTranslogFactory(), + translogFactory, this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7648f0a192ce7..be7e63a5c9068 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -583,6 +583,7 @@ public final class IndexSettings { private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; private final boolean isRemoteTranslogStoreEnabled; + private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; @@ -745,6 +746,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); + remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -1011,6 +1013,10 @@ public String getRemoteStoreRepository() { return remoteStoreRepository; } + public String getRemoteStoreTranslogRepository() { + return remoteStoreTranslogRepository; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index c7e380f842fa0..ca679d457c0dc 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -191,6 +191,11 @@ protected void findAndProcessShardPath( } final IndexSettings indexSettings = new IndexSettings(indexMetadata, settings); + if (indexSettings.isRemoteTranslogStoreEnabled()) { + // ToDo : Need to revisit corrupt shard recovery strategy for remote store enabled indices + throw new OpenSearchException("tool doesn't work for remote translog enabled indices"); + } + final Index index = indexMetadata.getIndex(); final ShardId shId = new ShardId(index, shardId); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java new file mode 100644 index 0000000000000..0d9e01aef4891 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.function.Supplier; + +/** + * Translog Factory for the remotefs translog {@link RemoteFsTranslog} + * + * @opensearch.internal + */ +public class RemoteBlobStoreInternalTranslogFactory implements TranslogFactory { + + private final Repository repository; + + private final ExecutorService executorService; + + public RemoteBlobStoreInternalTranslogFactory( + Supplier repositoriesServiceSupplier, + ThreadPool threadPool, + String repositoryName + ) { + Repository repository; + try { + repository = repositoriesServiceSupplier.get().repository(repositoryName); + } catch (RepositoryMissingException ex) { + throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", ex); + } + this.repository = repository; + this.executorService = threadPool.executor(ThreadPool.Names.TRANSLOG_TRANSFER); + } + + @Override + public Translog newTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); + return new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + blobStoreRepository, + executorService + ); + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 204bf4204511e..2946411fc9238 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -180,6 +180,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.function.Supplier; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -269,6 +270,7 @@ public class IndicesService extends AbstractLifecycleComponent private final boolean nodeWriteDanglingIndicesInfo; private final ValuesSourceRegistry valuesSourceRegistry; private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; + private final Supplier repositoriesServiceSupplier; @Override protected void doStart() { @@ -297,7 +299,8 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + Supplier repositoriesServiceSupplier ) { this.settings = settings; this.threadPool = threadPool; @@ -386,6 +389,7 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; + this.repositoriesServiceSupplier = repositoriesServiceSupplier; } public IndicesService( @@ -410,7 +414,8 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + Supplier repositoriesServiceSupplier ) { this.settings = settings; this.threadPool = threadPool; @@ -499,6 +504,7 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; + this.repositoriesServiceSupplier = repositoriesServiceSupplier; } private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask"; @@ -861,7 +867,8 @@ private synchronized IndexService createIndexService( namedWriteableRegistry, this::isIdFieldDataEnabled, valuesSourceRegistry, - remoteDirectoryFactory + remoteDirectoryFactory, + repositoriesServiceSupplier ); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 46270230ccf27..ed3256d499520 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -663,7 +663,6 @@ protected Node( ); final IndicesService indicesService; - if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { indicesService = new IndicesService( settings, @@ -687,7 +686,8 @@ protected Node( Map.copyOf(directoryFactories), searchModule.getValuesSourceRegistry(), recoveryStateFactories, - remoteDirectoryFactory + remoteDirectoryFactory, + repositoriesServiceReference::get ); } else { indicesService = new IndicesService( @@ -711,7 +711,8 @@ protected Node( Map.copyOf(directoryFactories), searchModule.getValuesSourceRegistry(), recoveryStateFactories, - remoteDirectoryFactory + remoteDirectoryFactory, + repositoriesServiceReference::get ); } diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 6bfdd9ae16773..429c2126d9a00 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.SetOnce.AlreadySetException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -217,6 +218,8 @@ public void tearDown() throws Exception { } private IndexService newIndexService(IndexModule module) throws IOException { + final SetOnce repositoriesServiceReference = new SetOnce<>(); + repositoriesServiceReference.set(repositoriesService); return module.newIndexService( CREATE_INDEX, nodeEnvironment, @@ -234,7 +237,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + repositoriesServiceReference::get ); } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 9ea262ab2263d..34087c7fa8df9 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -943,4 +943,56 @@ public void testSetRemoteRepositoryFailsWhenEmptyString() { ); assertEquals("Setting index.remote_store.repository should be provided with non-empty repository ID", iae.getMessage()); } + + public void testRemoteTranslogRepoDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertNull(settings.getRemoteStoreRepository()); + } + + public void testRemoteTranslogExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "tlog-store") + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertNull(settings.getRemoteStoreRepository()); + assertEquals("tlog-store", settings.getRemoteStoreTranslogRepository()); + } + + public void testSetRemoteTranslogRepositoryFailsWhenRemoteTranslogIsNotEnabled() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "repo1") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.translog.repository can only be set/enabled when index.remote_store.translog.enabled is set to true", + iae.getMessage() + ); + } + + public void testSetRemoteTranslogRepositoryFailsWhenEmptyString() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals("Setting index.remote_store.translog.repository should be provided with non-empty repository ID", iae.getMessage()); + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 663c325db12c2..5732fc5bfa270 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1797,6 +1797,8 @@ public void onFailure(final Exception e) { ); final BigArrays bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test"); final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); + final SetOnce repositoriesServiceReference = new SetOnce<>(); + repositoriesServiceReference.set(repositoriesService); if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { indicesService = new IndicesService( settings, @@ -1831,7 +1833,8 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + repositoriesServiceReference::get ); } else { indicesService = new IndicesService( @@ -1866,10 +1869,10 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + repositoriesServiceReference::get ); } - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( settings, From 66a74fead20d1b00f744b00483c65bb546cb1658 Mon Sep 17 00:00:00 2001 From: Joshua Palis Date: Wed, 28 Dec 2022 14:24:54 -0800 Subject: [PATCH 08/13] Increasing timeout of testQuorumRecovery to 90 seconds from 30 (#5651) * Increasing timeout of testQuorumRecovery to 90 seconds from 30 Signed-off-by: Joshua Palis * Updating changelog Signed-off-by: Joshua Palis Signed-off-by: Joshua Palis --- CHANGELOG.md | 1 + .../java/org/opensearch/gateway/QuorumGatewayIT.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ba7bfbb27bb0..71d8193613bf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Reject bulk requests with invalid actions ([#5299](https://github.com/opensearch-project/OpenSearch/issues/5299)) - Support OpenSSL Provider with default Netty allocator ([#5460](https://github.com/opensearch-project/OpenSearch/pull/5460)) +- Increasing timeout of testQuorumRecovery to 90 seconds from 30 ([#5651](https://github.com/opensearch-project/OpenSearch/pull/5651)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java index 1e190d3bec345..7e983b114450f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java @@ -92,7 +92,7 @@ public void doAfterNodes(int numNodes, final Client activeClient) throws Excepti logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); assertFalse(clusterHealth.isTimedOut()); assertEquals(ClusterHealthStatus.YELLOW, clusterHealth.getStatus()); - }, 30, TimeUnit.SECONDS); + }, 90, TimeUnit.SECONDS); logger.info("--> one node is closed -- index 1 document into the remaining nodes"); activeClient.prepareIndex("test") From a24e9626fe3dbe64e929dd48ef73c4a5f7ddef8e Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 28 Dec 2022 17:30:11 -0500 Subject: [PATCH 09/13] Remove --enable-preview feature flag since Apache Lucene now patches class files (#5642) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + build.gradle | 3 --- buildSrc/version.properties | 2 +- distribution/src/config/jvm.options | 3 --- .../lucene-expressions-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-expressions-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-icu-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-icu-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-nori-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-nori-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-stempel-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-stempel-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-analysis-common-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-analysis-common-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-backward-codecs-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-backward-codecs-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-core-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-core-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../licenses/lucene-grouping-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../licenses/lucene-grouping-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-highlighter-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-highlighter-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-join-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-join-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-memory-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-memory-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-misc-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-misc-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-queries-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-queries-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-queryparser-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-queryparser-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../lucene-spatial-extras-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../lucene-spatial-extras-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.5.0-snapshot-0878271.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - server/licenses/lucene-suggest-9.5.0-snapshot-0878271.jar.sha1 | 1 + server/licenses/lucene-suggest-9.5.0-snapshot-6700b7e.jar.sha1 | 1 - 48 files changed, 24 insertions(+), 29 deletions(-) create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-core-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-join-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.5.0-snapshot-6700b7e.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.5.0-snapshot-0878271.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.5.0-snapshot-6700b7e.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 71d8193613bf4..e589cf4cc866f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Changed http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Change http code for DecommissioningFailedException from 500 to 400 ([#5283](https://github.com/opensearch-project/OpenSearch/pull/5283)) - Pre conditions check before updating weighted routing metadata ([#4955](https://github.com/opensearch-project/OpenSearch/pull/4955)) +- Remove --enable-preview feature flag since Apache Lucene now patches class files ([#5642](https://github.com/opensearch-project/OpenSearch/pull/5642)) ### Deprecated diff --git a/build.gradle b/build.gradle index 2211da85345b5..84bbb6aceed14 100644 --- a/build.gradle +++ b/build.gradle @@ -413,9 +413,6 @@ gradle.projectsEvaluated { if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { task.jvmArgs += ["-Djava.security.manager=allow"] } - if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_19) { - task.jvmArgs += ["--enable-preview"] - } } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e0ea2cf759f2f..18db45a2c00c0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.5.0-snapshot-6700b7e +lucene = 9.5.0-snapshot-0878271 bundled_jdk_vendor = adoptium bundled_jdk = 19.0.1+10 diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 6cd5feadbef87..ef1035489c9fc 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -78,6 +78,3 @@ ${error.file} # Explicitly allow security manager (https://bugs.openjdk.java.net/browse/JDK-8270380) 18-:-Djava.security.manager=allow - -# Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4 (https://github.com/opensearch-project/OpenSearch/issues/4637) -19-:--enable-preview diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-0878271.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..7b246b88ba5a8 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +6912f9f9ceebbe5a54795035984709a4ada115e8 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-6700b7e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 276b29c190ce7..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08df331f48c00ce93cd53034e72d1c6df6089d4f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..e906bd304e64f --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +477512b093aa1dd55ea9340c3a16128f6722161b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index e024054a5e0e1..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1e6046cc43c434f7b67a067d79acb7afc16d2a5 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..fcdd55305387f --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +89e9389e166bbd5e774d4b62acd75bf37481062e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index f8efc9f57e775..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3aaa77079e6e9d3aa48f14567ff043c17928aa42 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..d1a86508da090 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +928c3eb74edf7ab62ea7d0948703f2867299cb56 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 29e7e6b583065..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -20571ad73b364476fe03edefac0a4b17d93496d4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..8b8986ee07ab2 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +2d5640fe3a99b9af4c469243032b89dba32f7ab2 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 006f92f49cdef..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51e525be810f618cca3607b050e67cef7dd10b3a \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..20f4c39f40ebf --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +44c3ceefd4999b046b935d8737c26c1a835c8d5a \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 97d6f734afb4b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f4ea57af1e7266349281ba89a6f44f9eea95f6e \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..899575f029626 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +6da2070b19949a397e45f2b54a739cef403d0c94 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index b68aecb39f299..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5eb8d1fb7bd44ddc4a50cab3f9d9feab05a7e4c1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-0878271.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..49000d3e4b151 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +1803e928dd330599048b71386456fb90acbed5c1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-6700b7e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 273592eda53be..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7dc7c673303ecf7c020aceb4be371e45021b559b \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..de9ad9baf756b --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +92c380f063c5b75efd6b3c48dffc942bfe21360f \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 514528000fb0e..0000000000000 --- a/server/licenses/lucene-analysis-common-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006b4486680b0e31b4605580f86e920f6777bb1f \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..77aa7df611354 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +2f0f211b1ed0447135f84a36935625e9a33a98e5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index fd1dac48117f2..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a020dfccda5436b1753867d2c1858ce03889faa5 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..f10ba49ec6dd7 --- /dev/null +++ b/server/licenses/lucene-core-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +1f8895d068b5d98c16d48f267a772d0243148040 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index b352d1f6602a1..0000000000000 --- a/server/licenses/lucene-core-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee2db9baf6cbbe8a9bb274e21f693388b31d4c27 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..ab153ccb80f0b --- /dev/null +++ b/server/licenses/lucene-grouping-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +f12819e51f145624ef7b1fd93aaa543cec13e3aa \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 77e5001b13332..0000000000000 --- a/server/licenses/lucene-grouping-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5087c01d1fde29e89767e845db97537f23a74f82 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..0885f1fda24bf --- /dev/null +++ b/server/licenses/lucene-highlighter-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +ed88cca26580a32234e3f605cf963703ea99eb60 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 4e2db3527dae6..0000000000000 --- a/server/licenses/lucene-highlighter-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -048c8b99bb17fc2e32a278f9d0396be299a24a44 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..afdba4c231926 --- /dev/null +++ b/server/licenses/lucene-join-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +f51934b2362e827d9f467507624b773153f0ca01 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 51f42b35982b9..0000000000000 --- a/server/licenses/lucene-join-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -187e3da58037aec9d0309c28776d3e806b8c3ea8 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..29d894a978024 --- /dev/null +++ b/server/licenses/lucene-memory-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +54744097c6882a498c08ffd8a42b54da00c9420c \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index f85b07247837f..0000000000000 --- a/server/licenses/lucene-memory-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -978470b902d93ffdfb3f3e95903c74b97e61cfae \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..b09c965d042b3 --- /dev/null +++ b/server/licenses/lucene-misc-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +b9061dea1178e0cac86147d7b69fc53bf2f8ee58 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 689f0ec8fcbfa..0000000000000 --- a/server/licenses/lucene-misc-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d727a2d00a8bbecc14579775dde0b5d18f9b48ea \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..604edd3b00842 --- /dev/null +++ b/server/licenses/lucene-queries-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +eca07717c0ee563c5337222fd1a1b7ef5f03f34f \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 389b62541fe4e..0000000000000 --- a/server/licenses/lucene-queries-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -70ea9bd7ae0cc33ecd4a7eaa47c0849edfc5087b \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..11ba3ca14d028 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +4d1ed21bc62940c4545778dc5f6249b67d08b095 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index ec2b9220b3113..0000000000000 --- a/server/licenses/lucene-queryparser-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8f624f29fa6f975c6bf8e9c4a5f51b5ecec5d3e \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..6bbb3be0bd801 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +c564c73080cff226a131847b4361bc9c2155f00c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index cf63a2316b0c5..0000000000000 --- a/server/licenses/lucene-sandbox-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7144b2bd92d95c7e24a0253e1c0753e5d7620c97 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..a4def52c3788e --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +7a654fcc671e0372a9becd7c7cbd9a1f43569106 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 116295dedd0df..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f61ce4f1890a3fb60fe66700e773001dc891f2ae \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..9b782e4eda598 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +33c9c43eac6b4924155eeff03d2231c2bed9b169 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index 9d2254c0477e3..0000000000000 --- a/server/licenses/lucene-spatial3d-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -31c18a397ecf3aee12d3809ef4b9b000ef1bde17 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-0878271.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-0878271.jar.sha1 new file mode 100644 index 0000000000000..5e3e9cebdc35c --- /dev/null +++ b/server/licenses/lucene-suggest-9.5.0-snapshot-0878271.jar.sha1 @@ -0,0 +1 @@ +95a69fd51377d964d16057a4a9501665a2ca9a7a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-6700b7e.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-6700b7e.jar.sha1 deleted file mode 100644 index b916208c935f3..0000000000000 --- a/server/licenses/lucene-suggest-9.5.0-snapshot-6700b7e.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3526d986a5b0e3cbfa1caae5ecf591269d5b7ed5 \ No newline at end of file From 30ac2849c3617442505bc0c78bc1b222f8ca16b1 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Wed, 28 Dec 2022 15:13:05 -0800 Subject: [PATCH 10/13] Ensure force merge to single segment is flushed (#5652) Signed-off-by: Daniel Widdis --- .../resources/rest-api-spec/test/search.aggregation/20_terms.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 7c7a223044725..c373ec645a4c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -824,6 +824,7 @@ setup: indices.forcemerge: index: test_1 max_num_segments: 1 + flush: true - do: search: From cc2b704d99653fe426014b0de0c9482d97288156 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Wed, 28 Dec 2022 15:14:56 -0800 Subject: [PATCH 11/13] Improve tests.config property thread safety (#5645) Signed-off-by: Daniel Widdis --- .../java/org/opensearch/bootstrap/BootstrapForTesting.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index 4f135f2d14a75..524362524bbde 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -130,8 +130,9 @@ public class BootstrapForTesting { // java.io.tmpdir FilePermissionUtils.addDirectoryPath(perms, "java.io.tmpdir", javaTmpDir, "read,readlink,write,delete", false); // custom test config file - if (Strings.hasLength(System.getProperty("tests.config"))) { - FilePermissionUtils.addSingleFilePath(perms, PathUtils.get(System.getProperty("tests.config")), "read,readlink"); + String testConfigFile = System.getProperty("tests.config"); + if (Strings.hasLength(testConfigFile)) { + FilePermissionUtils.addSingleFilePath(perms, PathUtils.get(testConfigFile), "read,readlink"); } // intellij hack: intellij test runner wants setIO and will // screw up all test logging without it! From f2b5044580053bf4d63f1545197488596e60b678 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Wed, 28 Dec 2022 20:05:22 -0800 Subject: [PATCH 12/13] Flush index to clear translog before stats validation (#5655) Signed-off-by: Daniel Widdis Signed-off-by: Daniel Widdis --- .../java/org/opensearch/search/PitMultiNodeTests.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index b11a80b9d8726..6a29417eae8ae 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -28,6 +28,7 @@ import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Requests; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -36,6 +37,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -337,8 +339,12 @@ public void onFailure(Exception e) {} public void validatePitStats(String index, long expectedPitCurrent, long expectedOpenContexts) throws ExecutionException, InterruptedException { + // Clear the index transaction log + FlushRequest flushRequest = Requests.flushRequest(index); + client().admin().indices().flush(flushRequest).get(); + // Test stats IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); - indicesStatsRequest.indices("index"); + indicesStatsRequest.indices(index); indicesStatsRequest.all(); IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); From d248643fc51ba9f263cdc2c6fa90405e63107328 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Thu, 29 Dec 2022 12:41:38 -0800 Subject: [PATCH 13/13] Fix flaky SearchCancellationIT tests to avoid race condition (#5656) * Add waiting time to account for Thread.sleep inaccuracy Signed-off-by: Daniel Widdis --- .../search/SearchCancellationIT.java | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java index da5698918cf99..7f500b4e25cea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java @@ -69,7 +69,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; @@ -88,6 +87,10 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) public class SearchCancellationIT extends OpenSearchIntegTestCase { + private TimeValue requestCancellationTimeout = TimeValue.timeValueSeconds(1); + private TimeValue clusterCancellationTimeout = TimeValue.timeValueMillis(1500); + private TimeValue keepAlive = TimeValue.timeValueSeconds(5); + @Override protected Collection> nodePlugins() { return Collections.singleton(ScriptedBlockPlugin.class); @@ -233,15 +236,13 @@ public void testCancellationDuringQueryPhaseUsingRequestParameter() throws Excep List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); ActionFuture searchResponse = client().prepareSearch("test") - .setCancelAfterTimeInterval(cancellationTimeout) + .setCancelAfterTimeInterval(requestCancellationTimeout) .setAllowPartialSearchResults(randomBoolean()) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))) .execute(); awaitForBlock(plugins); - // sleep for cancellation timeout to ensure scheduled cancellation task is actually executed - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(requestCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); ensureSearchWasCancelled(searchResponse); @@ -251,19 +252,19 @@ public void testCancellationDuringQueryPhaseUsingClusterSetting() throws Excepti List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, cancellationTimeout).build()) + .setPersistentSettings( + Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, clusterCancellationTimeout).build() + ) .get(); ActionFuture searchResponse = client().prepareSearch("test") .setAllowPartialSearchResults(randomBoolean()) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))) .execute(); awaitForBlock(plugins); - // sleep for cluster cancellation timeout to ensure scheduled cancellation task is actually executed - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(clusterCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); ensureSearchWasCancelled(searchResponse); @@ -288,14 +289,12 @@ public void testCancellationDuringFetchPhase() throws Exception { public void testCancellationDuringFetchPhaseUsingRequestParameter() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); ActionFuture searchResponse = client().prepareSearch("test") - .setCancelAfterTimeInterval(cancellationTimeout) + .setCancelAfterTimeInterval(requestCancellationTimeout) .addScriptField("test_field", new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap())) .execute(); awaitForBlock(plugins); - // sleep for request cancellation timeout to ensure scheduled cancellation task is actually executed - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(requestCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); ensureSearchWasCancelled(searchResponse); @@ -307,7 +306,7 @@ public void testCancellationOfScrollSearches() throws Exception { logger.info("Executing search"); ActionFuture searchResponse = client().prepareSearch("test") - .setScroll(TimeValue.timeValueSeconds(10)) + .setScroll(keepAlive) .setSize(5) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .execute(); @@ -326,16 +325,16 @@ public void testCancellationOfScrollSearches() throws Exception { public void testCancellationOfFirstScrollSearchRequestUsingRequestParameter() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); ActionFuture searchResponse = client().prepareSearch("test") - .setScroll(TimeValue.timeValueSeconds(10)) - .setCancelAfterTimeInterval(cancellationTimeout) + .setScroll(keepAlive) + .setCancelAfterTimeInterval(requestCancellationTimeout) .setSize(5) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .execute(); awaitForBlock(plugins); - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(requestCancellationTimeout.getMillis()); + // unblock the search thread disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(searchResponse); if (response != null) { @@ -354,7 +353,6 @@ public void testCancellationOfScrollSearchesOnFollowupRequests() throws Exceptio disableBlocks(plugins); logger.info("Executing search"); - TimeValue keepAlive = TimeValue.timeValueSeconds(5); SearchResponse searchResponse = client().prepareSearch("test") .setScroll(keepAlive) .setSize(2) @@ -394,11 +392,9 @@ public void testNoCancellationOfScrollSearchOnFollowUpRequest() throws Exception // Disable block so the first request would pass disableBlocks(plugins); - TimeValue keepAlive = TimeValue.timeValueSeconds(5); - TimeValue cancellationTimeout = TimeValue.timeValueSeconds(2); SearchResponse searchResponse = client().prepareSearch("test") .setScroll(keepAlive) - .setCancelAfterTimeInterval(cancellationTimeout) + .setCancelAfterTimeInterval(requestCancellationTimeout) .setSize(2) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))) .get(); @@ -418,8 +414,8 @@ public void testNoCancellationOfScrollSearchOnFollowUpRequest() throws Exception .execute(); awaitForBlock(plugins); - // sleep for cancellation timeout to ensure there is no scheduled task for cancellation - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(requestCancellationTimeout.getMillis()); + // unblock the search thread disableBlocks(plugins); // wait for response and ensure there is no failure @@ -432,11 +428,12 @@ public void testNoCancellationOfScrollSearchOnFollowUpRequest() throws Exception public void testDisableCancellationAtRequestLevel() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, cancellationTimeout).build()) + .setPersistentSettings( + Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, clusterCancellationTimeout).build() + ) .get(); ActionFuture searchResponse = client().prepareSearch("test") .setAllowPartialSearchResults(randomBoolean()) @@ -444,8 +441,7 @@ public void testDisableCancellationAtRequestLevel() throws Exception { .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))) .execute(); awaitForBlock(plugins); - // sleep for cancellation timeout to ensure there is no scheduled task for cancellation - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(clusterCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); // ensure search was successful since cancellation was disabled at request level @@ -455,7 +451,6 @@ public void testDisableCancellationAtRequestLevel() throws Exception { public void testDisableCancellationAtClusterLevel() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); client().admin() .cluster() .prepareUpdateSettings() @@ -466,8 +461,7 @@ public void testDisableCancellationAtClusterLevel() throws Exception { .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))) .execute(); awaitForBlock(plugins); - // sleep for cancellation timeout to ensure there is no scheduled task for cancellation - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(clusterCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); // ensure search was successful since cancellation was disabled at request level @@ -501,11 +495,12 @@ public void testCancelMultiSearch() throws Exception { public void testMSearchChildRequestCancellationWithClusterLevelTimeout() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue cancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, cancellationTimeout).build()) + .setPersistentSettings( + Settings.builder().put(SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, clusterCancellationTimeout).build() + ) .get(); ActionFuture mSearchResponse = client().prepareMultiSearch() .setMaxConcurrentSearchRequests(2) @@ -526,8 +521,7 @@ public void testMSearchChildRequestCancellationWithClusterLevelTimeout() throws ) .execute(); awaitForBlock(plugins); - // sleep for cluster cancellation timeout to ensure scheduled cancellation task is actually executed - Thread.sleep(cancellationTimeout.getMillis()); + sleepForAtLeast(clusterCancellationTimeout.getMillis()); // unblock the search thread disableBlocks(plugins); // both child requests are expected to fail @@ -544,8 +538,6 @@ public void testMSearchChildRequestCancellationWithClusterLevelTimeout() throws public void testMSearchChildReqCancellationWithHybridTimeout() throws Exception { List plugins = initBlockFactory(); indexTestData(); - TimeValue reqCancellationTimeout = new TimeValue(2, TimeUnit.SECONDS); - TimeValue clusterCancellationTimeout = new TimeValue(3, TimeUnit.SECONDS); client().admin() .cluster() .prepareUpdateSettings() @@ -558,7 +550,7 @@ public void testMSearchChildReqCancellationWithHybridTimeout() throws Exception .add( client().prepareSearch("test") .setAllowPartialSearchResults(randomBoolean()) - .setCancelAfterTimeInterval(reqCancellationTimeout) + .setCancelAfterTimeInterval(requestCancellationTimeout) .setQuery( scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())) ) @@ -581,8 +573,7 @@ public void testMSearchChildReqCancellationWithHybridTimeout() throws Exception ) .execute(); awaitForBlock(plugins); - // sleep for cluster cancellation timeout to ensure scheduled cancellation task is actually executed - Thread.sleep(Math.max(reqCancellationTimeout.getMillis(), clusterCancellationTimeout.getMillis())); + sleepForAtLeast(Math.max(requestCancellationTimeout.getMillis(), clusterCancellationTimeout.getMillis())); // unblock the search thread disableBlocks(plugins); // only first and last child request are expected to fail @@ -592,6 +583,16 @@ public void testMSearchChildReqCancellationWithHybridTimeout() throws Exception ensureMSearchWasCancelled(mSearchResponse, expectedFailedRequests); } + /** + * Sleeps for the specified number of milliseconds plus a 100ms buffer to account for system timer/scheduler inaccuracies. + * + * @param milliseconds The minimum time to sleep + * @throws InterruptedException if interrupted during sleep + */ + private static void sleepForAtLeast(long milliseconds) throws InterruptedException { + Thread.sleep(milliseconds + 100L); + } + public static class ScriptedBlockPlugin extends MockScriptPlugin { static final String SCRIPT_NAME = "search_block";