diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index a525ffb7f1add..c6a28f178d2bf 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -27,11 +27,15 @@ + + + + - + using azure host type " + hostType); final Settings settings = Settings.builder() diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeService.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeService.java index 7c71ef9ff3321..a979db38ab334 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeService.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeService.java @@ -47,44 +47,78 @@ public interface AzureComputeService { final class Management { - public static final Setting SUBSCRIPTION_ID_SETTING = - Setting.simpleString("cloud.azure.management.subscription.id", Property.NodeScope, Property.Filtered); - public static final Setting SERVICE_NAME_SETTING = - Setting.simpleString("cloud.azure.management.cloud.service.name", Property.NodeScope); + public static final Setting SUBSCRIPTION_ID_SETTING = Setting.simpleString( + "cloud.azure.management.subscription.id", + Property.NodeScope, + Property.Filtered + ); + public static final Setting SERVICE_NAME_SETTING = Setting.simpleString( + "cloud.azure.management.cloud.service.name", + Property.NodeScope + ); // Keystore settings - public static final Setting KEYSTORE_PATH_SETTING = - Setting.simpleString("cloud.azure.management.keystore.path", Property.NodeScope, Property.Filtered); - public static final Setting KEYSTORE_PASSWORD_SETTING = - Setting.simpleString("cloud.azure.management.keystore.password", Property.NodeScope, - Property.Filtered); - public static final Setting KEYSTORE_TYPE_SETTING = - new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, - Property.NodeScope, Property.Filtered); + public static final Setting KEYSTORE_PATH_SETTING = Setting.simpleString( + "cloud.azure.management.keystore.path", + Property.NodeScope, + Property.Filtered + ); + public static final Setting KEYSTORE_PASSWORD_SETTING = Setting.simpleString( + "cloud.azure.management.keystore.password", + Property.NodeScope, + Property.Filtered + ); + public static final Setting KEYSTORE_TYPE_SETTING = new Setting<>( + "cloud.azure.management.keystore.type", + KeyStoreType.pkcs12.name(), + KeyStoreType::fromString, + Property.NodeScope, + Property.Filtered + ); // so that it can overridden for tests - public static final Setting ENDPOINT_SETTING = new Setting("cloud.azure.management.endpoint", - "https://management.core.windows.net/", s -> { + public static final Setting ENDPOINT_SETTING = new Setting( + "cloud.azure.management.endpoint", + "https://management.core.windows.net/", + s -> { try { return new URI(s); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } - }, Property.NodeScope); + }, + Property.NodeScope + ); } final class Discovery { - public static final Setting REFRESH_SETTING = - Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); - public static final Setting HOST_TYPE_SETTING = - new Setting<>("discovery.azure.host.type", AzureSeedHostsProvider.HostType.PRIVATE_IP.name(), - AzureSeedHostsProvider.HostType::fromString, Property.NodeScope); - public static final Setting ENDPOINT_NAME_SETTING = new Setting<>("discovery.azure.endpoint.name", "opensearch", - Function.identity(), Property.NodeScope); - public static final Setting DEPLOYMENT_NAME_SETTING = Setting.simpleString("discovery.azure.deployment.name", - Property.NodeScope); - public static final Setting DEPLOYMENT_SLOT_SETTING = new Setting<>("discovery.azure.deployment.slot", - Deployment.PRODUCTION.name(), Deployment::fromString, Property.NodeScope); + public static final Setting REFRESH_SETTING = Setting.positiveTimeSetting( + "discovery.azure.refresh_interval", + TimeValue.timeValueSeconds(0), + Property.NodeScope + ); + public static final Setting HOST_TYPE_SETTING = new Setting<>( + "discovery.azure.host.type", + AzureSeedHostsProvider.HostType.PRIVATE_IP.name(), + AzureSeedHostsProvider.HostType::fromString, + Property.NodeScope + ); + public static final Setting ENDPOINT_NAME_SETTING = new Setting<>( + "discovery.azure.endpoint.name", + "opensearch", + Function.identity(), + Property.NodeScope + ); + public static final Setting DEPLOYMENT_NAME_SETTING = Setting.simpleString( + "discovery.azure.deployment.name", + Property.NodeScope + ); + public static final Setting DEPLOYMENT_SLOT_SETTING = new Setting<>( + "discovery.azure.deployment.slot", + Deployment.PRODUCTION.name(), + Deployment::fromString, + Property.NodeScope + ); } HostedServiceGetDetailedResponse getServiceDetails(); diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 093128c0a1392..6a8b8d83f539b 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -56,11 +56,9 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -public class AzureComputeServiceImpl extends AbstractLifecycleComponent - implements AzureComputeService { +public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { private static final Logger logger = LogManager.getLogger(AzureComputeServiceImpl.class); - private final ComputeManagementClient client; private final String serviceName; @@ -89,8 +87,15 @@ public AzureComputeServiceImpl(Settings settings) { Configuration configuration = new Configuration(builder); configuration.setProperty(Configuration.PROPERTY_LOG_HTTP_REQUESTS, logger.isTraceEnabled()); - Configuration managementConfig = ManagementConfiguration.configure(null, configuration, - Management.ENDPOINT_SETTING.get(settings), subscriptionId, keystorePath, keystorePassword, keystoreType); + Configuration managementConfig = ManagementConfiguration.configure( + null, + configuration, + Management.ENDPOINT_SETTING.get(settings), + subscriptionId, + keystorePath, + keystorePassword, + keystoreType + ); logger.debug("creating new Azure client for [{}], [{}]", subscriptionId, serviceName); client = ComputeManagementService.create(managementConfig); @@ -111,20 +116,20 @@ private static String getRequiredSetting(Settings settings, Setting sett public HostedServiceGetDetailedResponse getServiceDetails() { SpecialPermission.check(); try { - return AccessController.doPrivileged((PrivilegedExceptionAction) - () -> client.getHostedServicesOperations().getDetailed(serviceName)); + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> client.getHostedServicesOperations() + .getDetailed(serviceName) + ); } catch (PrivilegedActionException e) { throw new AzureServiceRemoteException("can not get list of azure nodes", e.getCause()); } } @Override - protected void doStart() throws OpenSearchException { - } + protected void doStart() throws OpenSearchException {} @Override - protected void doStop() throws OpenSearchException { - } + protected void doStop() throws OpenSearchException {} @Override protected void doClose() throws OpenSearchException { diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java index 9bcfbdd1907d9..0fe6904e83242 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java @@ -61,17 +61,17 @@ import java.util.List; public class AzureSeedHostsProvider implements SeedHostsProvider { - + private static final Logger logger = LogManager.getLogger(AzureSeedHostsProvider.class); public enum HostType { PRIVATE_IP("private_ip"), PUBLIC_IP("public_ip"); - private String type ; + private String type; HostType(String type) { - this.type = type ; + this.type = type; } public String getType() { @@ -123,8 +123,12 @@ public static Deployment fromString(String string) { private final String deploymentName; private final DeploymentSlot deploymentSlot; - public AzureSeedHostsProvider(Settings settings, AzureComputeService azureComputeService, - TransportService transportService, NetworkService networkService) { + public AzureSeedHostsProvider( + Settings settings, + AzureComputeService azureComputeService, + TransportService transportService, + NetworkService networkService + ) { this.settings = settings; this.azureComputeService = azureComputeService; this.transportService = transportService; @@ -152,8 +156,8 @@ public AzureSeedHostsProvider(Settings settings, AzureComputeService azureComput @Override public List getSeedAddresses(HostsResolver hostsResolver) { if (refreshInterval.millis() != 0) { - if (dynamicHosts != null && - (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { + if (dynamicHosts != null + && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { logger.trace("using cache to retrieve node list"); return dynamicHosts; } @@ -179,7 +183,8 @@ public List getSeedAddresses(HostsResolver hostsResolver) { InetAddress ipAddress = null; try { ipAddress = networkService.resolvePublishHostAddresses( - NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY)); + NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY) + ); logger.trace("ip of current node: [{}]", ipAddress); } catch (IOException e) { // We can't find the publish host address... Hmmm. Too bad :-( @@ -189,24 +194,26 @@ public List getSeedAddresses(HostsResolver hostsResolver) { for (HostedServiceGetDetailedResponse.Deployment deployment : detailed.getDeployments()) { // We check the deployment slot if (deployment.getDeploymentSlot() != deploymentSlot) { - logger.debug("current deployment slot [{}] for [{}] is different from [{}]. skipping...", - deployment.getDeploymentSlot(), deployment.getName(), deploymentSlot); + logger.debug( + "current deployment slot [{}] for [{}] is different from [{}]. skipping...", + deployment.getDeploymentSlot(), + deployment.getName(), + deploymentSlot + ); continue; } // If provided, we check the deployment name if (Strings.hasLength(deploymentName) && !deploymentName.equals(deployment.getName())) { - logger.debug("current deployment name [{}] different from [{}]. skipping...", - deployment.getName(), deploymentName); + logger.debug("current deployment name [{}] different from [{}]. skipping...", deployment.getName(), deploymentName); continue; } // We check current deployment status - if (deployment.getStatus() != DeploymentStatus.Starting && - deployment.getStatus() != DeploymentStatus.Deploying && - deployment.getStatus() != DeploymentStatus.Running) { - logger.debug("[{}] status is [{}]. skipping...", - deployment.getName(), deployment.getStatus()); + if (deployment.getStatus() != DeploymentStatus.Starting + && deployment.getStatus() != DeploymentStatus.Deploying + && deployment.getStatus() != DeploymentStatus.Running) { + logger.debug("[{}] status is [{}]. skipping...", deployment.getName(), deployment.getStatus()); continue; } diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index f50a37f11aab7..fabe8356812a4 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -71,31 +71,36 @@ protected AzureComputeService createComputeService() { } @Override - public Map> getSeedHostProviders(TransportService transportService, - NetworkService networkService) { - return Collections.singletonMap(AZURE, - () -> createSeedHostsProvider(settings, createComputeService(), transportService, networkService)); + public Map> getSeedHostProviders(TransportService transportService, NetworkService networkService) { + return Collections.singletonMap( + AZURE, + () -> createSeedHostsProvider(settings, createComputeService(), transportService, networkService) + ); } // Used for testing - protected AzureSeedHostsProvider createSeedHostsProvider(final Settings settings, - final AzureComputeService azureComputeService, - final TransportService transportService, - final NetworkService networkService) { + protected AzureSeedHostsProvider createSeedHostsProvider( + final Settings settings, + final AzureComputeService azureComputeService, + final TransportService transportService, + final NetworkService networkService + ) { return new AzureSeedHostsProvider(settings, azureComputeService, transportService, networkService); } @Override public List> getSettings() { - return Arrays.asList(AzureComputeService.Discovery.REFRESH_SETTING, - AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING, - AzureComputeService.Management.KEYSTORE_PATH_SETTING, - AzureComputeService.Management.KEYSTORE_TYPE_SETTING, - AzureComputeService.Management.SUBSCRIPTION_ID_SETTING, - AzureComputeService.Management.SERVICE_NAME_SETTING, - AzureComputeService.Discovery.HOST_TYPE_SETTING, - AzureComputeService.Discovery.DEPLOYMENT_NAME_SETTING, - AzureComputeService.Discovery.DEPLOYMENT_SLOT_SETTING, - AzureComputeService.Discovery.ENDPOINT_NAME_SETTING); + return Arrays.asList( + AzureComputeService.Discovery.REFRESH_SETTING, + AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING, + AzureComputeService.Management.KEYSTORE_PATH_SETTING, + AzureComputeService.Management.KEYSTORE_TYPE_SETTING, + AzureComputeService.Management.SUBSCRIPTION_ID_SETTING, + AzureComputeService.Management.SERVICE_NAME_SETTING, + AzureComputeService.Discovery.HOST_TYPE_SETTING, + AzureComputeService.Discovery.DEPLOYMENT_NAME_SETTING, + AzureComputeService.Discovery.DEPLOYMENT_SLOT_SETTING, + AzureComputeService.Discovery.ENDPOINT_NAME_SETTING + ); } } diff --git a/plugins/discovery-azure-classic/src/yamlRestTest/java/org/opensearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java b/plugins/discovery-azure-classic/src/yamlRestTest/java/org/opensearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java index 9c36883b49525..cb9648065067b 100644 --- a/plugins/discovery-azure-classic/src/yamlRestTest/java/org/opensearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java +++ b/plugins/discovery-azure-classic/src/yamlRestTest/java/org/opensearch/discovery/azure/classic/DiscoveryAzureClassicClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java index e5baf08773af9..7ff6a0f0ec16a 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java @@ -114,32 +114,41 @@ protected Response handle(final Request request) throws IOException { return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, "127.0.0.1".getBytes(UTF_8)); } - if (instanceProfile && - "/latest/meta-data/iam/security-credentials/".equals(request.getPath()) && - HttpGet.METHOD_NAME.equals(request.getMethod())) { + if (instanceProfile + && "/latest/meta-data/iam/security-credentials/".equals(request.getPath()) + && HttpGet.METHOD_NAME.equals(request.getMethod())) { final Map headers = new HashMap<>(contentType("text/plain")); return new Response(RestStatus.OK.getStatus(), headers, "my_iam_profile".getBytes(UTF_8)); } - if (instanceProfile && "/latest/api/token".equals(request.getPath()) - && HttpPut.METHOD_NAME.equals(request.getMethod())) { + if (instanceProfile && "/latest/api/token".equals(request.getPath()) && HttpPut.METHOD_NAME.equals(request.getMethod())) { // TODO: Implement IMDSv2 behavior here. For now this just returns a 403 which makes the SDK fall back to IMDSv1 - // which is implemented in this fixture + // which is implemented in this fixture return new Response(RestStatus.FORBIDDEN.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); } - if ((containerCredentials && - "/ecs_credentials_endpoint".equals(request.getPath()) && - HttpGet.METHOD_NAME.equals(request.getMethod())) || - ("/latest/meta-data/iam/security-credentials/my_iam_profile".equals(request.getPath()) && - HttpGet.METHOD_NAME.equals(request.getMethod()))) { + if ((containerCredentials + && "/ecs_credentials_endpoint".equals(request.getPath()) + && HttpGet.METHOD_NAME.equals(request.getMethod())) + || ("/latest/meta-data/iam/security-credentials/my_iam_profile".equals(request.getPath()) + && HttpGet.METHOD_NAME.equals(request.getMethod()))) { final Date expiration = new Date(new Date().getTime() + TimeUnit.DAYS.toMillis(1)); final String response = "{" - + "\"AccessKeyId\": \"" + "ec2_integration_test_access_key" + "\"," - + "\"Expiration\": \"" + DateUtils.formatISO8601Date(expiration) + "\"," - + "\"RoleArn\": \"" + "test" + "\"," - + "\"SecretAccessKey\": \"" + "test" + "\"," - + "\"Token\": \"" + "test" + "\"" + + "\"AccessKeyId\": \"" + + "ec2_integration_test_access_key" + + "\"," + + "\"Expiration\": \"" + + DateUtils.formatISO8601Date(expiration) + + "\"," + + "\"RoleArn\": \"" + + "test" + + "\"," + + "\"SecretAccessKey\": \"" + + "test" + + "\"," + + "\"Token\": \"" + + "test" + + "\"" + "}"; final Map headers = new HashMap<>(contentType("application/json")); diff --git a/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/AbstractAwsTestCase.java b/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/AbstractAwsTestCase.java index 54ba5ee810425..1e7422ea0ac02 100644 --- a/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/AbstractAwsTestCase.java +++ b/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/AbstractAwsTestCase.java @@ -56,9 +56,9 @@ public abstract class AbstractAwsTestCase extends OpenSearchIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()); + Settings.Builder settings = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()); // if explicit, just load it and don't load from env try { @@ -70,7 +70,8 @@ protected Settings nodeSettings(int nodeOrdinal) { } } else { throw new IllegalStateException( - "to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/opensearch.yml"); + "to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/opensearch.yml" + ); } } catch (SettingsException exception) { throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); diff --git a/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 9c2119bce7732..62550735993a8 100644 --- a/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/internalClusterTest/java/org/opensearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.discovery.ec2; - import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; @@ -50,17 +49,17 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase { public void testMinimumMasterNodesStart() { - Settings nodeSettings = Settings.builder() - .put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "ec2") - .build(); + Settings nodeSettings = Settings.builder().put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "ec2").build(); internalCluster().startNode(nodeSettings); // We try to update a setting now final String expectedValue = UUIDs.randomBase64UUID(random()); final String settingName = "cluster.routing.allocation.exclude.any_attribute"; - final ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(settingName, expectedValue)) - .get(); + final ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(settingName, expectedValue)) + .get(); final String value = response.getPersistentSettings().get(settingName); assertThat(value, is(expectedValue)); diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java index d6a9d06d8d35c..4b36a60bb278f 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java @@ -102,8 +102,14 @@ class AwsEc2SeedHostsProvider implements SeedHostsProvider { availabilityZones.addAll(AwsEc2Service.AVAILABILITY_ZONES_SETTING.get(settings)); if (logger.isDebugEnabled()) { - logger.debug("using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]", hostType, tags, - groups, bindAnyGroup, availabilityZones); + logger.debug( + "using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]", + hostType, + tags, + groups, + bindAnyGroup, + availabilityZones + ); } } @@ -144,18 +150,25 @@ protected List fetchDynamicNodes() { } if (bindAnyGroup) { // We check if we can find at least one group name or one group id in groups. - if (disjoint(securityGroupNames, groups) - && disjoint(securityGroupIds, groups)) { - logger.trace("filtering out instance {} based on groups {}, not part of {}", instance.getInstanceId(), - instanceSecurityGroups, groups); + if (disjoint(securityGroupNames, groups) && disjoint(securityGroupIds, groups)) { + logger.trace( + "filtering out instance {} based on groups {}, not part of {}", + instance.getInstanceId(), + instanceSecurityGroups, + groups + ); // continue to the next instance continue; } } else { // We need tp match all group names or group ids, otherwise we ignore this instance if (!(securityGroupNames.containsAll(groups) || securityGroupIds.containsAll(groups))) { - logger.trace("filtering out instance {} based on groups {}, does not include all of {}", - instance.getInstanceId(), instanceSecurityGroups, groups); + logger.trace( + "filtering out instance {} based on groups {}, does not include all of {}", + instance.getInstanceId(), + instanceSecurityGroups, + groups + ); // continue to the next instance continue; } @@ -195,8 +208,13 @@ && disjoint(securityGroupIds, groups)) { } catch (final Exception e) { final String finalAddress = address; logger.warn( - (Supplier) - () -> new ParameterizedMessage("failed to add {}, address {}", instance.getInstanceId(), finalAddress), e); + (Supplier) () -> new ParameterizedMessage( + "failed to add {}, address {}", + instance.getInstanceId(), + finalAddress + ), + e + ); } } else { logger.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType); @@ -210,23 +228,18 @@ && disjoint(securityGroupIds, groups)) { } private DescribeInstancesRequest buildDescribeInstancesRequest() { - final DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest() - .withFilters( - new Filter("instance-state-name").withValues("running", "pending") - ); + final DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest().withFilters( + new Filter("instance-state-name").withValues("running", "pending") + ); for (final Map.Entry> tagFilter : tags.entrySet()) { // for a given tag key, OR relationship for multiple different values - describeInstancesRequest.withFilters( - new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue()) - ); + describeInstancesRequest.withFilters(new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue())); } if (!availabilityZones.isEmpty()) { // OR relationship amongst multiple values of the availability-zone filter - describeInstancesRequest.withFilters( - new Filter("availability-zone").withValues(availabilityZones) - ); + describeInstancesRequest.withFilters(new Filter("availability-zone").withValues(availabilityZones)); } return describeInstancesRequest; @@ -235,7 +248,7 @@ private DescribeInstancesRequest buildDescribeInstancesRequest() { private final class TransportAddressesCache extends SingleObjectCache> { protected TransportAddressesCache(TimeValue refreshInterval) { - super(refreshInterval, new ArrayList<>()); + super(refreshInterval, new ArrayList<>()); } @Override diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2Service.java index 77f0eecbfc774..c3ed4340467bd 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2Service.java @@ -59,8 +59,12 @@ class HostType { * XXXX refers to a name of a tag configured for all EC2 instances. Instances which don't * have this tag set will be ignored by the discovery process. Defaults to private_ip. */ - Setting HOST_TYPE_SETTING = - new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP, Function.identity(), Property.NodeScope); + Setting HOST_TYPE_SETTING = new Setting<>( + "discovery.ec2.host_type", + HostType.PRIVATE_IP, + Function.identity(), + Property.NodeScope + ); /** * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the * discovery. Defaults to true. @@ -70,19 +74,30 @@ class HostType { * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) */ - Setting> GROUPS_SETTING = Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), - Property.NodeScope); + Setting> GROUPS_SETTING = Setting.listSetting( + "discovery.ec2.groups", + new ArrayList<>(), + s -> s.toString(), + Property.NodeScope + ); /** * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within * the provided availability zones will be used in the cluster discovery. */ - Setting> AVAILABILITY_ZONES_SETTING = Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), - s -> s.toString(), Property.NodeScope); + Setting> AVAILABILITY_ZONES_SETTING = Setting.listSetting( + "discovery.ec2.availability_zones", + Collections.emptyList(), + s -> s.toString(), + Property.NodeScope + ); /** * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. */ - Setting NODE_CACHE_TIME_SETTING = Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), - Property.NodeScope); + Setting NODE_CACHE_TIME_SETTING = Setting.timeSetting( + "discovery.ec2.node_cache_time", + TimeValue.timeValueSeconds(10), + Property.NodeScope + ); /** * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). @@ -90,8 +105,10 @@ class HostType { * instances with a tag key set to stage, and a value of dev. Several tags set will require all of those tags to be set for the * instance to be included. */ - Setting.AffixSetting> TAG_SETTING = Setting.prefixKeySetting("discovery.ec2.tag.", - key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + Setting.AffixSetting> TAG_SETTING = Setting.prefixKeySetting( + "discovery.ec2.tag.", + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope) + ); /** * Builds then caches an {@code AmazonEC2} client using the current client diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java index 61ffdb8620cc9..7566a7094e635 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -53,8 +53,7 @@ class AwsEc2ServiceImpl implements AwsEc2Service { private static final Logger logger = LogManager.getLogger(AwsEc2ServiceImpl.class); - private final AtomicReference> lazyClientReference = - new AtomicReference<>(); + private final AtomicReference> lazyClientReference = new AtomicReference<>(); private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); @@ -64,7 +63,8 @@ private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { // proxy for testing AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration, String endpoint) { - final AmazonEC2ClientBuilder builder = AmazonEC2ClientBuilder.standard().withCredentials(credentials) + final AmazonEC2ClientBuilder builder = AmazonEC2ClientBuilder.standard() + .withCredentials(credentials) .withClientConfiguration(configuration); if (Strings.hasText(endpoint)) { logger.debug("using explicit ec2 endpoint [{}]", endpoint); @@ -122,8 +122,10 @@ public AmazonEc2Reference client() { @Override public void refreshAndClearCache(Ec2ClientSettings clientSettings) { final LazyInitializable newClient = new LazyInitializable<>( - () -> new AmazonEc2Reference(buildClient(clientSettings)), clientReference -> clientReference.incRef(), - clientReference -> clientReference.decRef()); + () -> new AmazonEc2Reference(buildClient(clientSettings)), + clientReference -> clientReference.incRef(), + clientReference -> clientReference.decRef() + ); final LazyInitializable oldClient = this.lazyClientReference.getAndSet(newClient); if (oldClient != null) { oldClient.reset(); diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java index 913ca163a7ef2..499ad01ff359d 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java @@ -71,12 +71,20 @@ final class Ec2ClientSettings { static final Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); /** An override for the ec2 endpoint to connect to. */ - static final Setting ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", s -> s.toLowerCase(Locale.ROOT), - Property.NodeScope); + static final Setting ENDPOINT_SETTING = new Setting<>( + "discovery.ec2.endpoint", + "", + s -> s.toLowerCase(Locale.ROOT), + Property.NodeScope + ); /** The protocol to use to connect to to ec2. */ - static final Setting PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https", - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); + static final Setting PROTOCOL_SETTING = new Setting<>( + "discovery.ec2.protocol", + "https", + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope + ); /** The username of a proxy to connect to s3 through. */ static final Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null); @@ -85,8 +93,11 @@ final class Ec2ClientSettings { static final Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null); /** The socket timeout for connecting to s3. */ - static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", - TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); + static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting( + "discovery.ec2.read_timeout", + TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), + Property.NodeScope + ); private static final Logger logger = LogManager.getLogger(Ec2ClientSettings.class); @@ -122,8 +133,16 @@ final class Ec2ClientSettings { /** The read timeout for the ec2 client. */ final int readTimeoutMillis; - protected Ec2ClientSettings(AWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, - String proxyUsername, String proxyPassword, int readTimeoutMillis) { + protected Ec2ClientSettings( + AWSCredentials credentials, + String endpoint, + Protocol protocol, + String proxyHost, + int proxyPort, + String proxyUsername, + String proxyPassword, + int readTimeoutMillis + ) { this.credentials = credentials; this.endpoint = endpoint; this.protocol = protocol; @@ -135,27 +154,39 @@ protected Ec2ClientSettings(AWSCredentials credentials, String endpoint, Protoco } static AWSCredentials loadCredentials(Settings settings) { - try (SecureString key = ACCESS_KEY_SETTING.get(settings); - SecureString secret = SECRET_KEY_SETTING.get(settings); - SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings)) { + try ( + SecureString key = ACCESS_KEY_SETTING.get(settings); + SecureString secret = SECRET_KEY_SETTING.get(settings); + SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings) + ) { if (key.length() == 0 && secret.length() == 0) { if (sessionToken.length() > 0) { - throw new SettingsException("Setting [{}] is set but [{}] and [{}] are not", - SESSION_TOKEN_SETTING.getKey(), ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); + throw new SettingsException( + "Setting [{}] is set but [{}] and [{}] are not", + SESSION_TOKEN_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); } logger.debug("Using either environment variables, system properties or instance profile credentials"); return null; } else { if (key.length() == 0) { - deprecationLogger.deprecate("ec2_invalid_settings", + deprecationLogger.deprecate( + "ec2_invalid_settings", "Setting [{}] is set but [{}] is not, which will be unsupported in future", - SECRET_KEY_SETTING.getKey(), ACCESS_KEY_SETTING.getKey()); + SECRET_KEY_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey() + ); } if (secret.length() == 0) { - deprecationLogger.deprecate("ec2_invalid_settings", + deprecationLogger.deprecate( + "ec2_invalid_settings", "Setting [{}] is set but [{}] is not, which will be unsupported in future", - ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); } final AWSCredentials credentials; @@ -175,8 +206,10 @@ static AWSCredentials loadCredentials(Settings settings) { /** Parse settings for a single client. */ static Ec2ClientSettings getClientSettings(Settings settings) { final AWSCredentials credentials = loadCredentials(settings); - try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); - SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { + try ( + SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); + SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings) + ) { return new Ec2ClientSettings( credentials, ENDPOINT_SETTING.get(settings), @@ -185,7 +218,8 @@ static Ec2ClientSettings getClientSettings(Settings settings) { PROXY_PORT_SETTING.get(settings), proxyUsername.toString(), proxyPassword.toString(), - (int)READ_TIMEOUT_SETTING.get(settings).millis()); + (int) READ_TIMEOUT_SETTING.get(settings).millis() + ); } } diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java index 71a843a45a801..ef89a351e5767 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -109,33 +109,33 @@ public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings } @Override - public Map> getSeedHostProviders(TransportService transportService, - NetworkService networkService) { + public Map> getSeedHostProviders(TransportService transportService, NetworkService networkService) { return Collections.singletonMap(EC2, () -> new AwsEc2SeedHostsProvider(settings, transportService, ec2Service)); } @Override public List> getSettings() { return Arrays.asList( - // Register EC2 discovery settings: discovery.ec2 - Ec2ClientSettings.ACCESS_KEY_SETTING, - Ec2ClientSettings.SECRET_KEY_SETTING, - Ec2ClientSettings.SESSION_TOKEN_SETTING, - Ec2ClientSettings.ENDPOINT_SETTING, - Ec2ClientSettings.PROTOCOL_SETTING, - Ec2ClientSettings.PROXY_HOST_SETTING, - Ec2ClientSettings.PROXY_PORT_SETTING, - Ec2ClientSettings.PROXY_USERNAME_SETTING, - Ec2ClientSettings.PROXY_PASSWORD_SETTING, - Ec2ClientSettings.READ_TIMEOUT_SETTING, - AwsEc2Service.HOST_TYPE_SETTING, - AwsEc2Service.ANY_GROUP_SETTING, - AwsEc2Service.GROUPS_SETTING, - AwsEc2Service.AVAILABILITY_ZONES_SETTING, - AwsEc2Service.NODE_CACHE_TIME_SETTING, - AwsEc2Service.TAG_SETTING, - // Register cloud node settings: cloud.node - AwsEc2Service.AUTO_ATTRIBUTE_SETTING); + // Register EC2 discovery settings: discovery.ec2 + Ec2ClientSettings.ACCESS_KEY_SETTING, + Ec2ClientSettings.SECRET_KEY_SETTING, + Ec2ClientSettings.SESSION_TOKEN_SETTING, + Ec2ClientSettings.ENDPOINT_SETTING, + Ec2ClientSettings.PROTOCOL_SETTING, + Ec2ClientSettings.PROXY_HOST_SETTING, + Ec2ClientSettings.PROXY_PORT_SETTING, + Ec2ClientSettings.PROXY_USERNAME_SETTING, + Ec2ClientSettings.PROXY_PASSWORD_SETTING, + Ec2ClientSettings.READ_TIMEOUT_SETTING, + AwsEc2Service.HOST_TYPE_SETTING, + AwsEc2Service.ANY_GROUP_SETTING, + AwsEc2Service.GROUPS_SETTING, + AwsEc2Service.AVAILABILITY_ZONES_SETTING, + AwsEc2Service.NODE_CACHE_TIME_SETTING, + AwsEc2Service.TAG_SETTING, + // Register cloud node settings: cloud.node + AwsEc2Service.AUTO_ATTRIBUTE_SETTING + ); } @Override @@ -169,8 +169,10 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe throw new UncheckedIOException(e); } - try (InputStream in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream); - BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { + try ( + InputStream in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream); + BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)) + ) { final String metadataResult = urlReader.readLine(); if ((metadataResult == null) || (metadataResult.length() == 0)) { diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java index 49a11683557a0..9fa479e90c956 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java @@ -67,7 +67,7 @@ * @author Paul_Loy (keteracel) */ class Ec2NameResolver implements CustomNameResolver { - + private static final Logger logger = LogManager.getLogger(Ec2NameResolver.class); /** @@ -129,7 +129,7 @@ public InetAddress[] resolve(Ec2HostnameType type) throws IOException { @Override public InetAddress[] resolveDefault() { return null; // using this, one has to explicitly specify _ec2_ in network setting -// return resolve(Ec2HostnameType.DEFAULT, false); + // return resolve(Ec2HostnameType.DEFAULT, false); } @Override diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java index b25bf1f410801..3150f96443695 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -49,8 +49,10 @@ public class AwsEc2ServiceImplTests extends OpenSearchTestCase { public void testAWSCredentialsWithSystemProviders() { - final AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.EMPTY)); + final AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.EMPTY) + ); assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class)); } @@ -58,8 +60,10 @@ public void testAWSCredentialsWithOpenSearchAwsSettings() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.access_key", "aws_key"); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); - final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); assertThat(credentials.getAWSSecretKey(), is("aws_secret")); } @@ -69,8 +73,10 @@ public void testAWSSessionCredentialsWithOpenSearchAwsSettings() { secureSettings.setString("discovery.ec2.access_key", "aws_key"); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); secureSettings.setString("discovery.ec2.session_token", "aws_session_token"); - final BasicSessionCredentials credentials = (BasicSessionCredentials) AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + final BasicSessionCredentials credentials = (BasicSessionCredentials) AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); assertThat(credentials.getAWSSecretKey(), is("aws_secret")); assertThat(credentials.getSessionToken(), is("aws_session_token")); @@ -79,37 +85,51 @@ public void testAWSSessionCredentialsWithOpenSearchAwsSettings() { public void testDeprecationOfLoneAccessKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.access_key", "aws_key"); - final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); assertThat(credentials.getAWSSecretKey(), is("")); - assertSettingDeprecationsAndWarnings(new String[]{}, - "Setting [discovery.ec2.access_key] is set but [discovery.ec2.secret_key] is not, which will be unsupported in future"); + assertSettingDeprecationsAndWarnings( + new String[] {}, + "Setting [discovery.ec2.access_key] is set but [discovery.ec2.secret_key] is not, which will be unsupported in future" + ); } public void testDeprecationOfLoneSecretKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); - final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("")); assertThat(credentials.getAWSSecretKey(), is("aws_secret")); - assertSettingDeprecationsAndWarnings(new String[]{}, - "Setting [discovery.ec2.secret_key] is set but [discovery.ec2.access_key] is not, which will be unsupported in future"); + assertSettingDeprecationsAndWarnings( + new String[] {}, + "Setting [discovery.ec2.secret_key] is set but [discovery.ec2.access_key] is not, which will be unsupported in future" + ); } public void testRejectionOfLoneSessionToken() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.session_token", "aws_session_token"); - SettingsException e = expectThrows(SettingsException.class, () -> AwsEc2ServiceImpl.buildCredentials(logger, - Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()))); - assertThat(e.getMessage(), is( - "Setting [discovery.ec2.session_token] is set but [discovery.ec2.access_key] and [discovery.ec2.secret_key] are not")); + SettingsException e = expectThrows( + SettingsException.class, + () -> AwsEc2ServiceImpl.buildCredentials( + logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ) + ); + assertThat( + e.getMessage(), + is("Setting [discovery.ec2.session_token] is set but [discovery.ec2.access_key] and [discovery.ec2.secret_key] are not") + ); } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, - ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); + launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); } public void testAWSConfigurationWithAwsSettings() { @@ -126,15 +146,19 @@ public void testAWSConfigurationWithAwsSettings() { launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", 10000); } - protected void launchAWSConfigurationTest(Settings settings, - Protocol expectedProtocol, - String expectedProxyHost, - int expectedProxyPort, - String expectedProxyUsername, - String expectedProxyPassword, - int expectedReadTimeout) { - final ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, - Ec2ClientSettings.getClientSettings(settings)); + protected void launchAWSConfigurationTest( + Settings settings, + Protocol expectedProtocol, + String expectedProxyHost, + int expectedProxyPort, + String expectedProxyUsername, + String expectedProxyPassword, + int expectedReadTimeout + ) { + final ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration( + logger, + Ec2ClientSettings.getClientSettings(settings) + ); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/EC2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/EC2RetriesTests.java index 4c609d32a3fd2..9443eed5efae9 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/EC2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/EC2RetriesTests.java @@ -68,9 +68,21 @@ public class EC2RetriesTests extends AbstractEC2MockAPITestCase { @Override protected MockTransportService createTransportService() { - return new MockTransportService(Settings.EMPTY, new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + return new MockTransportService( + Settings.EMPTY, + new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + networkService, + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService() + ), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null + ); } public void testEC2DiscoveryRetriesOnRateLimiting() throws IOException { @@ -88,8 +100,10 @@ public void testEC2DiscoveryRetriesOnRateLimiting() throws IOException { if (auth == null || auth.contains(accessKey) == false) { throw new IllegalArgumentException("wrong access key: " + auth); } - if (failedRequests.compute(exchange.getRequestHeaders().getFirst("Amz-sdk-invocation-id"), - (requestId, count) -> (count == null ? 0 : count) + 1) < maxRetries) { + if (failedRequests.compute( + exchange.getRequestHeaders().getFirst("Amz-sdk-invocation-id"), + (requestId, count) -> (count == null ? 0 : count) + 1 + ) < maxRetries) { exchange.sendResponseHeaders(HttpStatus.SC_SERVICE_UNAVAILABLE, -1); return; } @@ -97,8 +111,9 @@ public void testEC2DiscoveryRetriesOnRateLimiting() throws IOException { byte[] responseBody = null; for (NameValuePair parse : URLEncodedUtils.parse(request, UTF_8)) { if ("Action".equals(parse.getName())) { - responseBody = generateDescribeInstancesResponse(hosts.stream().map( - address -> new Instance().withPublicIpAddress(address)).collect(Collectors.toList())); + responseBody = generateDescribeInstancesResponse( + hosts.stream().map(address -> new Instance().withPublicIpAddress(address)).collect(Collectors.toList()) + ); break; } } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 7e72b408360d1..be6261583bdd1 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -56,9 +56,7 @@ public class Ec2DiscoveryPluginTests extends OpenSearchTestCase { private Settings getNodeAttributes(Settings settings, String url) { - final Settings realSettings = Settings.builder() - .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), true) - .put(settings).build(); + final Settings realSettings = Settings.builder().put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), true).put(settings).build(); return Ec2DiscoveryPlugin.getAvailabilityZoneNodeAttributes(realSettings, url); } @@ -72,8 +70,7 @@ private void assertNodeAttributes(Settings settings, String url, String expected } public void testNodeAttributesDisabled() { - final Settings settings = Settings.builder() - .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), false).build(); + final Settings settings = Settings.builder().put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), false).build(); assertNodeAttributes(settings, "bogus", null); } @@ -84,9 +81,7 @@ public void testNodeAttributes() throws Exception { } public void testNodeAttributesBogusUrl() { - final UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> - getNodeAttributes(Settings.EMPTY, "bogus") - ); + final UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> getNodeAttributes(Settings.EMPTY, "bogus")); assertNotNull(e.getCause()); final String msg = e.getCause().getMessage(); assertTrue(msg, msg.contains("no protocol: bogus")); @@ -94,8 +89,9 @@ public void testNodeAttributesBogusUrl() { public void testNodeAttributesEmpty() throws Exception { final Path zoneUrl = createTempFile(); - final IllegalStateException e = expectThrows(IllegalStateException.class, () -> - getNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString()) + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> getNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString()) ); assertTrue(e.getMessage(), e.getMessage().contains("no ec2 metadata returned")); } @@ -131,11 +127,11 @@ public void testClientSettingsReInit() throws IOException { mockSecure1.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); mockSecure1.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); final Settings settings1 = Settings.builder() - .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") - .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) - .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") - .setSecureSettings(mockSecure1) - .build(); + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") + .setSecureSettings(mockSecure1) + .build(); final MockSecureSettings mockSecure2 = new MockSecureSettings(); mockSecure2.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_2"); mockSecure2.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_2"); @@ -146,11 +142,11 @@ public void testClientSettingsReInit() throws IOException { mockSecure2.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); mockSecure2.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); final Settings settings2 = Settings.builder() - .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") - .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) - .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") - .setSecureSettings(mockSecure2) - .build(); + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") + .setSecureSettings(mockSecure2) + .build(); try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) { try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { { @@ -159,7 +155,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials.getAWSSecretKey(), is("ec2_secret_1")); if (mockSecure1HasSessionToken) { assertThat(credentials, instanceOf(BasicSessionCredentials.class)); - assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_1")); + assertThat(((BasicSessionCredentials) credentials).getSessionToken(), is("ec2_session_token_1")); } else { assertThat(credentials, instanceOf(BasicAWSCredentials.class)); } @@ -176,7 +172,7 @@ public void testClientSettingsReInit() throws IOException { final AWSCredentials credentials = ((AmazonEC2Mock) clientReference.client()).credentials.getCredentials(); if (mockSecure1HasSessionToken) { assertThat(credentials, instanceOf(BasicSessionCredentials.class)); - assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_1")); + assertThat(((BasicSessionCredentials) credentials).getSessionToken(), is("ec2_session_token_1")); } else { assertThat(credentials, instanceOf(BasicAWSCredentials.class)); } @@ -193,7 +189,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(credentials.getAWSSecretKey(), is("ec2_secret_2")); if (mockSecure2HasSessionToken) { assertThat(credentials, instanceOf(BasicSessionCredentials.class)); - assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_2")); + assertThat(((BasicSessionCredentials) credentials).getSessionToken(), is("ec2_session_token_2")); } else { assertThat(credentials, instanceOf(BasicAWSCredentials.class)); } @@ -211,8 +207,7 @@ private static class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin { Ec2DiscoveryPluginMock(Settings settings) { super(settings, new AwsEc2ServiceImpl() { @Override - AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration, - String endpoint) { + AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration, String endpoint) { return new AmazonEC2Mock(credentials, configuration, endpoint); } }); @@ -232,7 +227,6 @@ private static class AmazonEC2Mock extends AbstractAmazonEC2 { } @Override - public void shutdown() { - } + public void shutdown() {} } } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index b45c7ab182e55..f1870a1c487e0 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -84,13 +84,19 @@ public class Ec2DiscoveryTests extends AbstractEC2MockAPITestCase { private Map poorMansDNS = new ConcurrentHashMap<>(); protected MockTransportService createTransportService() { - final Transport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, - new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, writableRegistry(), - new NoneCircuitBreakerService()) { + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + writableRegistry(), + new NoneCircuitBreakerService() + ) { @Override public TransportAddress[] addressesFromString(String address) { // we just need to ensure we don't resolve DNS here - return new TransportAddress[] {poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress())}; + return new TransportAddress[] { poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress()) }; } }; return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); @@ -116,21 +122,20 @@ protected List buildDynamicHosts(Settings nodeSettings, int no // Simulate an EC2 DescribeInstancesResponse final Map> tagsIncluded = new HashMap<>(); final String[] params = request.split("&"); - Arrays.stream(params).filter(entry -> entry.startsWith("Filter.") && entry.contains("=tag%3A")) - .forEach(entry -> { - final int startIndex = "Filter.".length(); - final int filterId = Integer.parseInt(entry.substring(startIndex, entry.indexOf(".", startIndex))); - tagsIncluded.put(entry.substring(entry.indexOf("=tag%3A") + "=tag%3A".length()), - Arrays.stream(params) - .filter(param -> param.startsWith("Filter." + filterId + ".Value.")) - .map(param -> param.substring(param.indexOf("=") + 1)) - .collect(Collectors.toList())); - } + Arrays.stream(params).filter(entry -> entry.startsWith("Filter.") && entry.contains("=tag%3A")).forEach(entry -> { + final int startIndex = "Filter.".length(); + final int filterId = Integer.parseInt(entry.substring(startIndex, entry.indexOf(".", startIndex))); + tagsIncluded.put( + entry.substring(entry.indexOf("=tag%3A") + "=tag%3A".length()), + Arrays.stream(params) + .filter(param -> param.startsWith("Filter." + filterId + ".Value.")) + .map(param -> param.substring(param.indexOf("=") + 1)) + .collect(Collectors.toList()) ); + }); final List instances = IntStream.range(1, nodes + 1).mapToObj(node -> { final String instanceId = "node" + node; - final Instance instance = new Instance() - .withInstanceId(instanceId) + final Instance instance = new Instance().withInstanceId(instanceId) .withState(new InstanceState().withName(InstanceStateName.Running)) .withPrivateDnsName(PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS) .withPublicDnsName(PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS) @@ -140,12 +145,19 @@ protected List buildDynamicHosts(Settings nodeSettings, int no instance.setTags(tagsList.get(node - 1)); } return instance; - }).filter(instance -> - tagsIncluded.entrySet().stream().allMatch(entry -> instance.getTags().stream() - .filter(t -> t.getKey().equals(entry.getKey())) - .map(Tag::getValue) - .collect(Collectors.toList()) - .containsAll(entry.getValue()))) + }) + .filter( + instance -> tagsIncluded.entrySet() + .stream() + .allMatch( + entry -> instance.getTags() + .stream() + .filter(t -> t.getKey().equals(entry.getKey())) + .map(Tag::getValue) + .collect(Collectors.toList()) + .containsAll(entry.getValue()) + ) + ) .collect(Collectors.toList()); for (NameValuePair parse : URLEncodedUtils.parse(request, UTF_8)) { if ("Action".equals(parse.getName())) { @@ -171,8 +183,7 @@ protected List buildDynamicHosts(Settings nodeSettings, int no public void testDefaultSettings() throws InterruptedException { int nodes = randomInt(10); - Settings nodeSettings = Settings.builder() - .build(); + Settings nodeSettings = Settings.builder().build(); List discoveryNodes = buildDynamicHosts(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); } @@ -180,11 +191,9 @@ public void testDefaultSettings() throws InterruptedException { public void testPrivateIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(PREFIX_PRIVATE_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(PREFIX_PRIVATE_IP + (i + 1), buildNewFakeTransportAddress()); } - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip").build(); List transportAddresses = buildDynamicHosts(nodeSettings, nodes); assertThat(transportAddresses, hasSize(nodes)); // We check that we are using here expected address @@ -198,11 +207,9 @@ public void testPrivateIp() throws InterruptedException { public void testPublicIp() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - poorMansDNS.put(PREFIX_PUBLIC_IP + (i+1), buildNewFakeTransportAddress()); + poorMansDNS.put(PREFIX_PUBLIC_IP + (i + 1), buildNewFakeTransportAddress()); } - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip").build(); List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address @@ -216,21 +223,17 @@ public void testPublicIp() throws InterruptedException { public void testPrivateDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - String instanceId = "node" + (i+1); - poorMansDNS.put(PREFIX_PRIVATE_DNS + instanceId + - SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); + String instanceId = "node" + (i + 1); + poorMansDNS.put(PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS, buildNewFakeTransportAddress()); } - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns").build(); List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address int node = 1; for (TransportAddress address : dynamicHosts) { String instanceId = "node" + node++; - TransportAddress expected = poorMansDNS.get( - PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS); + TransportAddress expected = poorMansDNS.get(PREFIX_PRIVATE_DNS + instanceId + SUFFIX_PRIVATE_DNS); assertEquals(address, expected); } } @@ -238,41 +241,31 @@ public void testPrivateDns() throws InterruptedException { public void testPublicDns() throws InterruptedException { int nodes = randomInt(10); for (int i = 0; i < nodes; i++) { - String instanceId = "node" + (i+1); - poorMansDNS.put(PREFIX_PUBLIC_DNS + instanceId - + SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); + String instanceId = "node" + (i + 1); + poorMansDNS.put(PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS, buildNewFakeTransportAddress()); } - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns").build(); List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address int node = 1; for (TransportAddress address : dynamicHosts) { String instanceId = "node" + node++; - TransportAddress expected = poorMansDNS.get( - PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS); + TransportAddress expected = poorMansDNS.get(PREFIX_PUBLIC_DNS + instanceId + SUFFIX_PUBLIC_DNS); assertEquals(address, expected); } } public void testInvalidHostType() throws InterruptedException { - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "does_not_exist") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "does_not_exist").build(); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { - buildDynamicHosts(nodeSettings, 1); - }); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { buildDynamicHosts(nodeSettings, 1); }); assertThat(exception.getMessage(), containsString("does_not_exist is unknown for discovery.ec2.host_type")); } public void testFilterByTags() throws InterruptedException { int nodes = randomIntBetween(5, 10); - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.TAG_SETTING.getKey() + "stage", "prod") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.TAG_SETTING.getKey() + "stage", "prod").build(); int prodInstances = 0; List> tagsList = new ArrayList<>(); @@ -295,9 +288,7 @@ public void testFilterByTags() throws InterruptedException { public void testFilterByMultipleTags() throws InterruptedException { int nodes = randomIntBetween(5, 10); - Settings nodeSettings = Settings.builder() - .putList(AwsEc2Service.TAG_SETTING.getKey() + "stage", "prod", "preprod") - .build(); + Settings nodeSettings = Settings.builder().putList(AwsEc2Service.TAG_SETTING.getKey() + "stage", "prod", "preprod").build(); int prodInstances = 0; List> tagsList = new ArrayList<>(); @@ -334,9 +325,7 @@ public void testReadHostFromTag() throws UnknownHostException { poorMansDNS.put("node" + (node + 1), new TransportAddress(InetAddress.getByName(addresses[node]), 9300)); } - Settings nodeSettings = Settings.builder() - .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "tag:foo") - .build(); + Settings nodeSettings = Settings.builder().put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "tag:foo").build(); List> tagsList = new ArrayList<>(); @@ -358,6 +347,7 @@ public void testReadHostFromTag() throws UnknownHostException { abstract static class DummyEc2SeedHostsProvider extends AwsEc2SeedHostsProvider { public int fetchCount = 0; + DummyEc2SeedHostsProvider(Settings settings, TransportService transportService, AwsEc2Service service) { super(settings, transportService, service); } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java index 627369bd4b32f..482afb748229e 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java @@ -76,7 +76,7 @@ public class Ec2NetworkTests extends OpenSearchTestCase { public static void startHttp() throws Exception { httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - BiConsumer registerContext = (path, v) ->{ + BiConsumer registerContext = (path, v) -> { final byte[] message = v.getBytes(UTF_8); httpServer.createContext(path, (s) -> { s.sendResponseHeaders(RestStatus.OK.getStatus(), message.length); @@ -85,10 +85,10 @@ public static void startHttp() throws Exception { responseBody.close(); }); }; - registerContext.accept("/latest/meta-data/local-ipv4","127.0.0.1"); - registerContext.accept("/latest/meta-data/public-ipv4","165.168.10.2"); - registerContext.accept("/latest/meta-data/public-hostname","165.168.10.3"); - registerContext.accept("/latest/meta-data/local-hostname","10.10.10.5"); + registerContext.accept("/latest/meta-data/local-ipv4", "127.0.0.1"); + registerContext.accept("/latest/meta-data/public-ipv4", "165.168.10.2"); + registerContext.accept("/latest/meta-data/public-hostname", "165.168.10.3"); + registerContext.accept("/latest/meta-data/local-hostname", "10.10.10.5"); httpServer.start(); } @@ -96,8 +96,12 @@ public static void startHttp() throws Exception { @Before public void setup() { // redirect EC2 metadata service to httpServer - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, - "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort())); + AccessController.doPrivileged( + (PrivilegedAction) () -> System.setProperty( + EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, + "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort() + ) + ); } @AfterClass @@ -118,14 +122,17 @@ public void testNetworkHostEc2() throws IOException { */ public void testNetworkHostUnableToResolveEc2() { // redirect EC2 metadata service to unknown location - AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, - "http://127.0.0.1/")); + AccessController.doPrivileged( + (PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, "http://127.0.0.1/") + ); try { resolveEc2("_ec2_", (InetAddress[]) null); } catch (IOException e) { - assertThat(e.getMessage(), - equalTo("IOException caught when fetching InetAddress from [http://127.0.0.1//latest/meta-data/local-ipv4]")); + assertThat( + e.getMessage(), + equalTo("IOException caught when fetching InetAddress from [http://127.0.0.1//latest/meta-data/local-ipv4]") + ); } } @@ -171,15 +178,14 @@ public void testNetworkHostEc2PublicDns() throws IOException { resolveEc2("_ec2:publicDns_", InetAddress.getByName("165.168.10.3")); } - private InetAddress[] resolveEc2(String host, InetAddress ... expected) throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", host) - .build(); + private InetAddress[] resolveEc2(String host, InetAddress... expected) throws IOException { + Settings nodeSettings = Settings.builder().put("network.host", host).build(); NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver())); InetAddress[] addresses = networkService.resolveBindHostAddresses( - NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); + NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY) + ); if (expected == null) { fail("We should get an IOException, resolved addressed:" + Arrays.toString(addresses)); } diff --git a/plugins/discovery-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java b/plugins/discovery-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java index 03b8a4d348b24..02749eee986bd 100644 --- a/plugins/discovery-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java +++ b/plugins/discovery-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java index 77bef1cbfe240..f1eba6a635504 100644 --- a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java +++ b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java @@ -127,54 +127,66 @@ private PathTrie defaultHandlers() { }; // https://cloud.google.com/compute/docs/storing-retrieving-metadata - handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/project-id"), - request -> simpleValue.apply(PROJECT_ID)); - handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/attributes/google-compute-default-zone"), - request -> simpleValue.apply(ZONE)); + handlers.insert( + nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/project-id"), + request -> simpleValue.apply(PROJECT_ID) + ); + handlers.insert( + nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/attributes/google-compute-default-zone"), + request -> simpleValue.apply(ZONE) + ); // https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances - handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/instance/service-accounts/default/token"), - request -> jsonValue.apply(Strings.toString(jsonBuilder() - .startObject() - .field("access_token", TOKEN) - .field("expires_in", TimeUnit.HOURS.toSeconds(1)) - .field("token_type", TOKEN_TYPE) - .endObject()))); + handlers.insert( + nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/instance/service-accounts/default/token"), + request -> jsonValue.apply( + Strings.toString( + jsonBuilder().startObject() + .field("access_token", TOKEN) + .field("expires_in", TimeUnit.HOURS.toSeconds(1)) + .field("token_type", TOKEN_TYPE) + .endObject() + ) + ) + ); // https://cloud.google.com/compute/docs/reference/rest/v1/instances - handlers.insert(authPath(HttpGet.METHOD_NAME, "/compute/v1/projects/{project}/zones/{zone}/instances"), - request -> { - final List> items = new ArrayList<>(); - int count = 0; - for (String address : Files.readAllLines(nodes)) { - count++; - items.add(MapBuilder.newMapBuilder() + handlers.insert(authPath(HttpGet.METHOD_NAME, "/compute/v1/projects/{project}/zones/{zone}/instances"), request -> { + final List> items = new ArrayList<>(); + int count = 0; + for (String address : Files.readAllLines(nodes)) { + count++; + items.add( + MapBuilder.newMapBuilder() .put("id", Long.toString(9309873766405L + count)) .put("description", "ES node" + count) .put("name", "test" + count) .put("kind", "compute#instance") .put("machineType", "n1-standard-1") - .put("networkInterfaces", - Collections.singletonList(MapBuilder.newMapBuilder() - .put("accessConfigs", Collections.emptyList()) - .put("name", "nic0") - .put("network", "default") - .put("networkIP", address) - .immutableMap())) + .put( + "networkInterfaces", + Collections.singletonList( + MapBuilder.newMapBuilder() + .put("accessConfigs", Collections.emptyList()) + .put("name", "nic0") + .put("network", "default") + .put("networkIP", address) + .immutableMap() + ) + ) .put("status", "RUNNING") .put("zone", ZONE) - .immutableMap()); - } - - final String json = Strings.toString(jsonBuilder() - .startObject() - .field("id", "test-instances") - .field("items", items) - .endObject()); - - final byte[] responseAsBytes = json.getBytes(StandardCharsets.UTF_8); - final Map headers = new HashMap<>(JSON_CONTENT_TYPE); - commonHeaderConsumer.accept(headers); - return new Response(RestStatus.OK.getStatus(), headers, responseAsBytes); + .immutableMap() + ); + } + + final String json = Strings.toString( + jsonBuilder().startObject().field("id", "test-instances").field("items", items).endObject() + ); + + final byte[] responseAsBytes = json.getBytes(StandardCharsets.UTF_8); + final Map headers = new HashMap<>(JSON_CONTENT_TYPE); + commonHeaderConsumer.accept(headers); + return new Response(RestStatus.OK.getStatus(), headers, responseAsBytes); }); return handlers; } @@ -201,22 +213,29 @@ protected Response handle(final Request request) throws IOException { } private static Response newError(final RestStatus status, final String code, final String message) throws IOException { - final String response = Strings.toString(jsonBuilder() - .startObject() - .field("error", MapBuilder.newMapBuilder() - .put("errors", Collections.singletonList( + final String response = Strings.toString( + jsonBuilder().startObject() + .field( + "error", MapBuilder.newMapBuilder() - .put("domain", "global") - .put("reason", "required") - .put("message", message) - .put("locationType", "header") - .put("location", code) - .immutableMap() - )) - .put("code", status.getStatus()) - .put("message", message) - .immutableMap()) - .endObject()); + .put( + "errors", + Collections.singletonList( + MapBuilder.newMapBuilder() + .put("domain", "global") + .put("reason", "required") + .put("message", message) + .put("locationType", "header") + .put("location", code) + .immutableMap() + ) + ) + .put("code", status.getStatus()) + .put("message", message) + .immutableMap() + ) + .endObject() + ); return new Response(status.getStatus(), JSON_CONTENT_TYPE, response.getBytes(UTF_8)); } diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java index 7e820d2c599b6..815537b534586 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java @@ -77,10 +77,10 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "gce") - .put("cloud.gce.project_id", "test") - .put("cloud.gce.zone", "test") + .put(super.nodeSettings(nodeOrdinal)) + .put(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "gce") + .put("cloud.gce.project_id", "test") + .put("cloud.gce.zone", "test") .build(); } @@ -89,22 +89,26 @@ public void testJoin() { final String masterNode = internalCluster().startMasterOnlyNode(); registerGceNode(masterNode); - ClusterStateResponse clusterStateResponse = client(masterNode).admin().cluster().prepareState() - .setMasterNodeTimeout("1s") - .clear() - .setNodes(true) - .get(); + ClusterStateResponse clusterStateResponse = client(masterNode).admin() + .cluster() + .prepareState() + .setMasterNodeTimeout("1s") + .clear() + .setNodes(true) + .get(); assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); // start another node final String secondNode = internalCluster().startNode(); registerGceNode(secondNode); - clusterStateResponse = client(secondNode).admin().cluster().prepareState() - .setMasterNodeTimeout("1s") - .clear() - .setNodes(true) - .setLocal(true) - .get(); + clusterStateResponse = client(secondNode).admin() + .cluster() + .prepareState() + .setMasterNodeTimeout("1s") + .clear() + .setNodes(true) + .setLocal(true) + .get(); assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); // wait for the cluster to form @@ -187,8 +191,7 @@ public List zones() { } @Override - public void close() throws IOException { - } + public void close() throws IOException {} }; } } diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesService.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesService.java index 43f8fe6632e6d..2d0b38c53a9a7 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesService.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesService.java @@ -60,15 +60,22 @@ public interface GceInstancesService extends Closeable { /** * cloud.gce.zone: Google Compute Engine zones */ - Setting> ZONE_SETTING = - Setting.listSetting("cloud.gce.zone", Collections.emptyList(), Function.identity(), Property.NodeScope); + Setting> ZONE_SETTING = Setting.listSetting( + "cloud.gce.zone", + Collections.emptyList(), + Function.identity(), + Property.NodeScope + ); /** * cloud.gce.refresh_interval: How long the list of hosts is cached to prevent further requests to the AWS API. 0 disables caching. * A negative value will cause infinite caching. Defaults to 0s. */ - Setting REFRESH_SETTING = - Setting.timeSetting("cloud.gce.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); + Setting REFRESH_SETTING = Setting.timeSetting( + "cloud.gce.refresh_interval", + TimeValue.timeValueSeconds(0), + Property.NodeScope + ); /** * cloud.gce.retry: Should we retry calling GCE API in case of error? Defaults to true. @@ -80,8 +87,7 @@ public interface GceInstancesService extends Closeable { * It's a total time since the initial call is made. * A negative value will retry indefinitely. Defaults to `-1s` (retry indefinitely). */ - Setting MAX_WAIT_SETTING = - Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), Property.NodeScope); + Setting MAX_WAIT_SETTING = Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), Property.NodeScope); /** * Return a collection of running instances within the same GCE project diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java index 58a8e1c8678c9..f25faaf415140 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java @@ -67,14 +67,21 @@ import java.util.function.Function; public class GceInstancesServiceImpl implements GceInstancesService { - + private static final Logger logger = LogManager.getLogger(GceInstancesServiceImpl.class); // all settings just used for testing - not registered by default - public static final Setting GCE_VALIDATE_CERTIFICATES = - Setting.boolSetting("cloud.gce.validate_certificates", true, Property.NodeScope); - public static final Setting GCE_ROOT_URL = - new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), Property.NodeScope); + public static final Setting GCE_VALIDATE_CERTIFICATES = Setting.boolSetting( + "cloud.gce.validate_certificates", + true, + Property.NodeScope + ); + public static final Setting GCE_ROOT_URL = new Setting<>( + "cloud.gce.root_url", + "https://www.googleapis.com", + Function.identity(), + Property.NodeScope + ); private final String project; private final List zones; @@ -91,8 +98,9 @@ public Collection instances() { return list.execute(); }); // assist type inference - return instanceList.isEmpty() || instanceList.getItems() == null ? - Collections.emptyList() : instanceList.getItems(); + return instanceList.isEmpty() || instanceList.getItems() == null + ? Collections.emptyList() + : instanceList.getItems(); } catch (IOException e) { logger.warn((Supplier) () -> new ParameterizedMessage("Problem fetching instance list for zone {}", zoneId), e); logger.debug("Full exception:", e); @@ -151,8 +159,9 @@ private List resolveZones() { } try { - final String defaultZone = - getAppEngineValueFromMetadataServer("/computeMetadata/v1/project/attributes/google-compute-default-zone"); + final String defaultZone = getAppEngineValueFromMetadataServer( + "/computeMetadata/v1/project/attributes/google-compute-default-zone" + ); return Collections.singletonList(defaultZone); } catch (Exception e) { logger.warn("unable to resolve default zone from metadata server for GCE discovery service", e); @@ -194,8 +203,7 @@ protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurit public synchronized Compute client() { if (refreshInterval != null && refreshInterval.millis() != 0) { - if (client != null && - (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { + if (client != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { if (logger.isTraceEnabled()) logger.trace("using cache to retrieve client"); return client; } @@ -207,13 +215,13 @@ public synchronized Compute client() { logger.info("starting GCE discovery service"); // Forcing Google Token API URL as set in GCE SDK to - // http://metadata/computeMetadata/v1/instance/service-accounts/default/token + // http://metadata/computeMetadata/v1/instance/service-accounts/default/token // See https://developers.google.com/compute/docs/metadata#metadataserver - String tokenServerEncodedUrl = GceMetadataService.GCE_HOST.get(settings) + - "/computeMetadata/v1/instance/service-accounts/default/token"; - ComputeCredential credential = new ComputeCredential.Builder(getGceHttpTransport(), gceJsonFactory) - .setTokenServerEncodedUrl(tokenServerEncodedUrl) - .build(); + String tokenServerEncodedUrl = GceMetadataService.GCE_HOST.get(settings) + + "/computeMetadata/v1/instance/service-accounts/default/token"; + ComputeCredential credential = new ComputeCredential.Builder(getGceHttpTransport(), gceJsonFactory).setTokenServerEncodedUrl( + tokenServerEncodedUrl + ).build(); // hack around code messiness in GCE code // TODO: get this fixed @@ -224,7 +232,6 @@ public synchronized Compute client() { refreshInterval = TimeValue.timeValueSeconds(credential.getExpiresInSeconds() - 1); } - Compute.Builder builder = new Compute.Builder(getGceHttpTransport(), gceJsonFactory, null).setApplicationName(VERSION) .setRootUrl(GCE_ROOT_URL.get(settings)); diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java index b43189243cb76..4873cb6dcbf7a 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java @@ -54,11 +54,15 @@ public class GceMetadataService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(GceMetadataService.class); // Forcing Google Token API URL as set in GCE SDK to - // http://metadata/computeMetadata/v1/instance/service-accounts/default/token + // http://metadata/computeMetadata/v1/instance/service-accounts/default/token // See https://developers.google.com/compute/docs/metadata#metadataserver // all settings just used for testing - not registered by default - public static final Setting GCE_HOST = - new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Setting.Property.NodeScope); + public static final Setting GCE_HOST = new Setting<>( + "cloud.gce.host", + "http://metadata.google.internal", + Function.identity(), + Setting.Property.NodeScope + ); private final Settings settings; @@ -78,7 +82,7 @@ protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurit public String metadata(String metadataPath) throws IOException, URISyntaxException { // Forcing Google Token API URL as set in GCE SDK to - // http://metadata/computeMetadata/v1/instance/service-accounts/default/token + // http://metadata/computeMetadata/v1/instance/service-accounts/default/token // See https://developers.google.com/compute/docs/metadata#metadataserver final URI urlMetadataNetwork = new URI(GCE_HOST.get(settings)).resolve("/computeMetadata/v1/instance/").resolve(metadataPath); logger.debug("get metadata from [{}]", urlMetadataNetwork); @@ -91,11 +95,9 @@ public String metadata(String metadataPath) throws IOException, URISyntaxExcepti // This is needed to query meta data: https://cloud.google.com/compute/docs/metadata headers.put("Metadata-Flavor", "Google"); - HttpResponse response = Access.doPrivilegedIOException(() -> - getGceHttpTransport().createRequestFactory() - .buildGetRequest(genericUrl) - .setHeaders(headers) - .execute()); + HttpResponse response = Access.doPrivilegedIOException( + () -> getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute() + ); String metadata = response.parseAsString(); logger.debug("metadata found [{}]", metadata); return metadata; diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/network/GceNameResolver.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/network/GceNameResolver.java index dfca14fb59ecc..7482dfac401ef 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/network/GceNameResolver.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/network/GceNameResolver.java @@ -111,8 +111,12 @@ private InetAddress[] resolve(String value) throws IOException { // We replace network placeholder with network interface value gceMetadataPath = Strings.replace(GceAddressResolverType.PRIVATE_IP.gceName, "{{network}}", network); } else { - throw new IllegalArgumentException("[" + value + "] is not one of the supported GCE network.host setting. " + - "Expecting _gce_, _gce:privateIp:X_, _gce:hostname_"); + throw new IllegalArgumentException( + "[" + + value + + "] is not one of the supported GCE network.host setting. " + + "Expecting _gce_, _gce:privateIp:X_, _gce:hostname_" + ); } try { diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java index b329b0f3e8a42..1401f7ca26ce6 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java @@ -65,8 +65,7 @@ public static void doPrivilegedVoid(final Runnable action) { }); } - public static T doPrivilegedIOException(final PrivilegedExceptionAction operation) - throws IOException { + public static T doPrivilegedIOException(final PrivilegedExceptionAction operation) throws IOException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java index 785786b7f5286..19247a7bb536d 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java @@ -68,8 +68,12 @@ public class GceSeedHostsProvider implements SeedHostsProvider { /** * discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags. */ - public static final Setting> TAGS_SETTING = - Setting.listSetting("discovery.gce.tags", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> TAGS_SETTING = Setting.listSetting( + "discovery.gce.tags", + emptyList(), + Function.identity(), + Property.NodeScope + ); static final class Status { private static final String TERMINATED = "TERMINATED"; @@ -88,9 +92,12 @@ static final class Status { private long lastRefresh; private List cachedDynamicHosts; - public GceSeedHostsProvider(Settings settings, GceInstancesService gceInstancesService, - TransportService transportService, - NetworkService networkService) { + public GceSeedHostsProvider( + Settings settings, + GceInstancesService gceInstancesService, + TransportService transportService, + NetworkService networkService + ) { this.settings = settings; this.gceInstancesService = gceInstancesService; this.transportService = transportService; @@ -114,14 +121,19 @@ public GceSeedHostsProvider(Settings settings, GceInstancesService gceInstancesS public List getSeedAddresses(HostsResolver hostsResolver) { // We check that needed properties have been set if (this.project == null || this.project.isEmpty() || this.zones == null || this.zones.isEmpty()) { - throw new IllegalArgumentException("one or more gce discovery settings are missing. " + - "Check opensearch.yml file. Should have [" + GceInstancesService.PROJECT_SETTING.getKey() + - "] and [" + GceInstancesService.ZONE_SETTING.getKey() + "]."); + throw new IllegalArgumentException( + "one or more gce discovery settings are missing. " + + "Check opensearch.yml file. Should have [" + + GceInstancesService.PROJECT_SETTING.getKey() + + "] and [" + + GceInstancesService.ZONE_SETTING.getKey() + + "]." + ); } if (refreshInterval.millis() != 0) { - if (cachedDynamicHosts != null && - (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { + if (cachedDynamicHosts != null + && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list"); return cachedDynamicHosts; } @@ -133,7 +145,8 @@ public List getSeedAddresses(HostsResolver hostsResolver) { String ipAddress = null; try { InetAddress inetAddress = networkService.resolvePublishHostAddresses( - NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY)); + NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY) + ); if (inetAddress != null) { ipAddress = NetworkAddress.format(inetAddress); } @@ -168,8 +181,10 @@ public List getSeedAddresses(HostsResolver hostsResolver) { boolean filterByTag = false; if (tags.isEmpty() == false) { logger.trace("start filtering instance {} with tags {}.", name, tags); - if (instance.getTags() == null || instance.getTags().isEmpty() - || instance.getTags().getItems() == null || instance.getTags().getItems().isEmpty()) { + if (instance.getTags() == null + || instance.getTags().isEmpty() + || instance.getTags().getItems() == null + || instance.getTags().getItems().isEmpty()) { // If this instance have no tag, we filter it logger.trace("no tags for this instance but we asked for tags. {} won't be part of the cluster.", name); filterByTag = true; @@ -192,8 +207,12 @@ public List getSeedAddresses(HostsResolver hostsResolver) { } } if (filterByTag) { - logger.trace("filtering out instance {} based tags {}, not part of {}", name, tags, - instance.getTags() == null || instance.getTags().getItems() == null ? "" : instance.getTags()); + logger.trace( + "filtering out instance {} based tags {}, not part of {}", + name, + tags, + instance.getTags() == null || instance.getTags().getItems() == null ? "" : instance.getTags() + ); continue; } else { logger.trace("instance {} with tags {} is added to discovery", name, tags); @@ -249,8 +268,14 @@ public List getSeedAddresses(HostsResolver hostsResolver) { TransportAddress[] addresses = transportService.addressesFromString(address); for (TransportAddress transportAddress : addresses) { - logger.trace("adding {}, type {}, address {}, transport_address {}, status {}", name, type, - ip_private, transportAddress, status); + logger.trace( + "adding {}, type {}, address {}, transport_address {}, status {}", + name, + type, + ip_private, + transportAddress, + status + ); cachedDynamicHosts.add(transportAddress); } } diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java index d8c7752214ebf..26b8215bed7ff 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java @@ -73,8 +73,7 @@ public RetryHttpInitializerWrapper(Credential wrappedCredential, TimeValue maxWa } // Use only for testing. - RetryHttpInitializerWrapper( - Credential wrappedCredential, Sleeper sleeper, TimeValue maxWait) { + RetryHttpInitializerWrapper(Credential wrappedCredential, Sleeper sleeper, TimeValue maxWait) { this.wrappedCredential = Objects.requireNonNull(wrappedCredential); this.sleeper = sleeper; this.maxWait = maxWait; @@ -88,45 +87,35 @@ static MockGoogleCredential.Builder newMockCredentialBuilder() { @Override public void initialize(HttpRequest httpRequest) { - final HttpUnsuccessfulResponseHandler backoffHandler = - new HttpBackOffUnsuccessfulResponseHandler( - new ExponentialBackOff.Builder() - .setMaxElapsedTimeMillis(((int) maxWait.getMillis())) - .build()) - .setSleeper(sleeper); + final HttpUnsuccessfulResponseHandler backoffHandler = new HttpBackOffUnsuccessfulResponseHandler( + new ExponentialBackOff.Builder().setMaxElapsedTimeMillis(((int) maxWait.getMillis())).build() + ).setSleeper(sleeper); httpRequest.setInterceptor(wrappedCredential); - httpRequest.setUnsuccessfulResponseHandler( - new HttpUnsuccessfulResponseHandler() { - int retry = 0; - - @Override - public boolean handleResponse(HttpRequest request, HttpResponse response, boolean supportsRetry) throws IOException { - if (wrappedCredential.handleResponse( - request, response, supportsRetry)) { - // If credential decides it can handle it, - // the return code or message indicated - // something specific to authentication, - // and no backoff is desired. - return true; - } else if (backoffHandler.handleResponse( - request, response, supportsRetry)) { - // Otherwise, we defer to the judgement of - // our internal backoff handler. - logger.debug("Retrying [{}] times : [{}]", retry, request.getUrl()); - return true; - } else { - return false; - } - } - }); + httpRequest.setUnsuccessfulResponseHandler(new HttpUnsuccessfulResponseHandler() { + int retry = 0; + + @Override + public boolean handleResponse(HttpRequest request, HttpResponse response, boolean supportsRetry) throws IOException { + if (wrappedCredential.handleResponse(request, response, supportsRetry)) { + // If credential decides it can handle it, + // the return code or message indicated + // something specific to authentication, + // and no backoff is desired. + return true; + } else if (backoffHandler.handleResponse(request, response, supportsRetry)) { + // Otherwise, we defer to the judgement of + // our internal backoff handler. + logger.debug("Retrying [{}] times : [{}]", retry, request.getUrl()); + return true; + } else { + return false; + } + } + }); httpRequest.setIOExceptionHandler( - new HttpBackOffIOExceptionHandler( - new ExponentialBackOff.Builder() - .setMaxElapsedTimeMillis(((int) maxWait.getMillis())) - .build()) - .setSleeper(sleeper) + new HttpBackOffIOExceptionHandler(new ExponentialBackOff.Builder().setMaxElapsedTimeMillis(((int) maxWait.getMillis())).build()) + .setSleeper(sleeper) ); } } - diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/opensearch/plugin/discovery/gce/GceDiscoveryPlugin.java index f9c78b71e74a3..6d015f54ffb29 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -65,8 +65,9 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable { /** Determines whether settings those reroutes GCE call should be allowed (for testing purposes only). */ - private static final boolean ALLOW_REROUTE_GCE_SETTINGS = - Booleans.parseBoolean(System.getProperty("opensearch.allow_reroute_gce_settings", "false")); + private static final boolean ALLOW_REROUTE_GCE_SETTINGS = Booleans.parseBoolean( + System.getProperty("opensearch.allow_reroute_gce_settings", "false") + ); public static final String GCE = "gce"; protected final Settings settings; @@ -83,7 +84,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close * our plugin permissions don't allow core to "reach through" plugins to * change the permission. Because that'd be silly. */ - Access.doPrivilegedVoid( () -> ClassInfo.of(HttpHeaders.class, true)); + Access.doPrivilegedVoid(() -> ClassInfo.of(HttpHeaders.class, true)); } public GceDiscoveryPlugin(Settings settings) { @@ -97,8 +98,7 @@ protected GceInstancesService createGceInstancesService() { } @Override - public Map> getSeedHostProviders(TransportService transportService, - NetworkService networkService) { + public Map> getSeedHostProviders(TransportService transportService, NetworkService networkService) { return Collections.singletonMap(GCE, () -> { gceInstancesService.set(createGceInstancesService()); return new GceSeedHostsProvider(settings, gceInstancesService.get(), transportService, networkService); @@ -121,7 +121,8 @@ public List> getSettings() { GceSeedHostsProvider.TAGS_SETTING, GceInstancesService.REFRESH_SETTING, GceInstancesService.RETRY_SETTING, - GceInstancesService.MAX_WAIT_SETTING) + GceInstancesService.MAX_WAIT_SETTING + ) ); if (ALLOW_REROUTE_GCE_SETTINGS) { @@ -131,8 +132,6 @@ public List> getSettings() { return Collections.unmodifiableList(settings); } - - @Override public void close() throws IOException { IOUtils.close(gceInstancesService.get()); diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index ad1c4fdc46c2e..2ca1234bb8a04 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -120,8 +120,12 @@ public void stopGceComputeService() throws IOException { } protected List buildDynamicNodes(GceInstancesServiceImpl gceInstancesService, Settings nodeSettings) { - GceSeedHostsProvider provider = new GceSeedHostsProvider(nodeSettings, gceInstancesService, - transportService, new NetworkService(Collections.emptyList())); + GceSeedHostsProvider provider = new GceSeedHostsProvider( + nodeSettings, + gceInstancesService, + transportService, + new NetworkService(Collections.emptyList()) + ); List dynamicHosts = provider.getSeedAddresses(null); logger.info("--> addresses found: {}", dynamicHosts); @@ -130,9 +134,9 @@ protected List buildDynamicNodes(GceInstancesServiceImpl gceIn public void testNodesWithDifferentTagsAndNoTagSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -140,10 +144,10 @@ public void testNodesWithDifferentTagsAndNoTagSet() { public void testNodesWithDifferentTagsAndOneTagSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(1)); @@ -151,10 +155,10 @@ public void testNodesWithDifferentTagsAndOneTagSet() { public void testNodesWithDifferentTagsAndTwoTagSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch", "dev") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch", "dev") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(1)); @@ -162,9 +166,9 @@ public void testNodesWithDifferentTagsAndTwoTagSet() { public void testNodesWithSameTagsAndNoTagSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -172,10 +176,10 @@ public void testNodesWithSameTagsAndNoTagSet() { public void testNodesWithSameTagsAndOneTagSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -183,10 +187,10 @@ public void testNodesWithSameTagsAndOneTagSet() { public void testNodesWithSameTagsAndTwoTagsSet() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") - .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch", "dev") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") + .putList(GceSeedHostsProvider.TAGS_SETTING.getKey(), "opensearch", "dev") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -194,9 +198,9 @@ public void testNodesWithSameTagsAndTwoTagsSet() { public void testMultipleZonesAndTwoNodesInSameZone() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -204,9 +208,9 @@ public void testMultipleZonesAndTwoNodesInSameZone() { public void testMultipleZonesAndTwoNodesInDifferentZones() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(2)); @@ -217,9 +221,9 @@ public void testMultipleZonesAndTwoNodesInDifferentZones() { */ public void testZeroNode43() { Settings nodeSettings = Settings.builder() - .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) - .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "us-central1-b") - .build(); + .put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName) + .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "us-central1-b") + .build(); mock = new GceInstancesServiceMock(nodeSettings); List dynamicHosts = buildDynamicNodes(mock, nodeSettings); assertThat(dynamicHosts, hasSize(0)); diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceNetworkTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceNetworkTests.java index be87e900b3ef0..a1d7613bf2ba4 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceNetworkTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceNetworkTests.java @@ -94,8 +94,10 @@ public void testNetworkHostPrivateIpInterface() throws IOException { * network.host: _local_ */ public void networkHostCoreLocal() throws IOException { - resolveGce("_local_", new NetworkService(Collections.emptyList()) - .resolveBindHostAddresses(new String[] { NetworkService.DEFAULT_NETWORK_HOST })); + resolveGce( + "_local_", + new NetworkService(Collections.emptyList()).resolveBindHostAddresses(new String[] { NetworkService.DEFAULT_NETWORK_HOST }) + ); } /** @@ -105,7 +107,7 @@ public void networkHostCoreLocal() throws IOException { * @throws IOException Well... If something goes wrong :) */ private void resolveGce(String gceNetworkSetting, InetAddress expected) throws IOException { - resolveGce(gceNetworkSetting, expected == null ? null : new InetAddress [] { expected }); + resolveGce(gceNetworkSetting, expected == null ? null : new InetAddress[] { expected }); } /** @@ -115,15 +117,14 @@ private void resolveGce(String gceNetworkSetting, InetAddress expected) throws I * @throws IOException Well... If something goes wrong :) */ private void resolveGce(String gceNetworkSetting, InetAddress[] expected) throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", gceNetworkSetting) - .build(); + Settings nodeSettings = Settings.builder().put("network.host", gceNetworkSetting).build(); GceMetadataServiceMock mock = new GceMetadataServiceMock(nodeSettings); NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(mock))); try { InetAddress[] addresses = networkService.resolveBindHostAddresses( - NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); + NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY) + ); if (expected == null) { fail("We should get a IllegalArgumentException when setting network.host: _gce:doesnotexist_"); } diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapperTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapperTests.java index c4e4bdcc04bc7..c56719972acd8 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapperTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapperTests.java @@ -107,24 +107,24 @@ public LowLevelHttpRequest buildRequest(String method, String url) { } public void testSimpleRetry() throws Exception { - FailThenSuccessBackoffTransport fakeTransport = - new FailThenSuccessBackoffTransport(HttpStatusCodes.STATUS_CODE_SERVER_ERROR, 3); + FailThenSuccessBackoffTransport fakeTransport = new FailThenSuccessBackoffTransport(HttpStatusCodes.STATUS_CODE_SERVER_ERROR, 3); - MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder() - .build(); + MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder().build(); MockSleeper mockSleeper = new MockSleeper(); - RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, mockSleeper, - TimeValue.timeValueSeconds(5)); + RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper( + credential, + mockSleeper, + TimeValue.timeValueSeconds(5) + ); - Compute client = new Compute.Builder(fakeTransport, new JacksonFactory(), null) - .setHttpRequestInitializer(retryHttpInitializerWrapper) - .setApplicationName("test") - .build(); + Compute client = new Compute.Builder(fakeTransport, new JacksonFactory(), null).setHttpRequestInitializer( + retryHttpInitializerWrapper + ).setApplicationName("test").build(); // TODO (URL) replace w/ opensearch url - HttpRequest request = client.getRequestFactory().buildRequest( - "Get", new GenericUrl("https://github.com/opensearch-project/OpenSearch"), null); + HttpRequest request = client.getRequestFactory() + .buildRequest("Get", new GenericUrl("https://github.com/opensearch-project/OpenSearch"), null); HttpResponse response = request.execute(); assertThat(mockSleeper.getCount(), equalTo(3)); @@ -135,11 +135,12 @@ public void testRetryWaitTooLong() throws Exception { TimeValue maxWaitTime = TimeValue.timeValueMillis(10); int maxRetryTimes = 50; - FailThenSuccessBackoffTransport fakeTransport = - new FailThenSuccessBackoffTransport(HttpStatusCodes.STATUS_CODE_SERVER_ERROR, maxRetryTimes); + FailThenSuccessBackoffTransport fakeTransport = new FailThenSuccessBackoffTransport( + HttpStatusCodes.STATUS_CODE_SERVER_ERROR, + maxRetryTimes + ); JsonFactory jsonFactory = new JacksonFactory(); - MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder() - .build(); + MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder().build(); MockSleeper oneTimeSleeper = new MockSleeper() { @Override @@ -151,14 +152,13 @@ public void sleep(long millis) throws InterruptedException { RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, oneTimeSleeper, maxWaitTime); - Compute client = new Compute.Builder(fakeTransport, jsonFactory, null) - .setHttpRequestInitializer(retryHttpInitializerWrapper) - .setApplicationName("test") - .build(); + Compute client = new Compute.Builder(fakeTransport, jsonFactory, null).setHttpRequestInitializer(retryHttpInitializerWrapper) + .setApplicationName("test") + .build(); // TODO (URL) replace w/ opensearch URL - HttpRequest request1 = client.getRequestFactory().buildRequest("Get", new GenericUrl( - "https://github.com/opensearch-project/OpenSearch"), null); + HttpRequest request1 = client.getRequestFactory() + .buildRequest("Get", new GenericUrl("https://github.com/opensearch-project/OpenSearch"), null); try { request1.execute(); fail("Request should fail if wait too long"); @@ -170,23 +170,27 @@ public void sleep(long millis) throws InterruptedException { } public void testIOExceptionRetry() throws Exception { - FailThenSuccessBackoffTransport fakeTransport = - new FailThenSuccessBackoffTransport(HttpStatusCodes.STATUS_CODE_SERVER_ERROR, 1, true); + FailThenSuccessBackoffTransport fakeTransport = new FailThenSuccessBackoffTransport( + HttpStatusCodes.STATUS_CODE_SERVER_ERROR, + 1, + true + ); - MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder() - .build(); + MockGoogleCredential credential = RetryHttpInitializerWrapper.newMockCredentialBuilder().build(); MockSleeper mockSleeper = new MockSleeper(); - RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, mockSleeper, - TimeValue.timeValueSeconds(30L)); + RetryHttpInitializerWrapper retryHttpInitializerWrapper = new RetryHttpInitializerWrapper( + credential, + mockSleeper, + TimeValue.timeValueSeconds(30L) + ); - Compute client = new Compute.Builder(fakeTransport, new JacksonFactory(), null) - .setHttpRequestInitializer(retryHttpInitializerWrapper) - .setApplicationName("test") - .build(); + Compute client = new Compute.Builder(fakeTransport, new JacksonFactory(), null).setHttpRequestInitializer( + retryHttpInitializerWrapper + ).setApplicationName("test").build(); // TODO (URL) replace w/ opensearch URL - HttpRequest request = client.getRequestFactory().buildRequest("Get", new GenericUrl( - "https://github.com/opensearch-project/OpenSearch"), null); + HttpRequest request = client.getRequestFactory() + .buildRequest("Get", new GenericUrl("https://github.com/opensearch-project/OpenSearch"), null); HttpResponse response = request.execute(); assertThat(mockSleeper.getCount(), equalTo(1)); diff --git a/plugins/discovery-gce/src/yamlRestTest/java/org/opensearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java b/plugins/discovery-gce/src/yamlRestTest/java/org/opensearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java index 65fbee003d75b..6afcecf9e6d6f 100644 --- a/plugins/discovery-gce/src/yamlRestTest/java/org/opensearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java +++ b/plugins/discovery-gce/src/yamlRestTest/java/org/opensearch/discovery/gce/DiscoveryGceClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java index a7506816db0e7..0eb864a2d9ac0 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java @@ -71,8 +71,16 @@ public final class AttachmentProcessor extends AbstractProcessor { private final boolean ignoreMissing; private final String indexedCharsField; - AttachmentProcessor(String tag, String description, String field, String targetField, Set properties, - int indexedChars, boolean ignoreMissing, String indexedCharsField) { + AttachmentProcessor( + String tag, + String description, + String field, + String targetField, + Set properties, + int indexedChars, + boolean ignoreMissing, + String indexedCharsField + ) { super(tag, description); this.field = field; this.targetField = targetField; @@ -208,8 +216,12 @@ public static final class Factory implements Processor.Factory { static final Set DEFAULT_PROPERTIES = EnumSet.allOf(Property.class); @Override - public AttachmentProcessor create(Map registry, String processorTag, - String description, Map config) throws Exception { + public AttachmentProcessor create( + Map registry, + String processorTag, + String description, + Map config + ) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "attachment"); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); @@ -224,16 +236,28 @@ public AttachmentProcessor create(Map registry, Strin try { properties.add(Property.parse(fieldName)); } catch (Exception e) { - throw newConfigurationException(TYPE, processorTag, "properties", "illegal field option [" + - fieldName + "]. valid values are " + Arrays.toString(Property.values())); + throw newConfigurationException( + TYPE, + processorTag, + "properties", + "illegal field option [" + fieldName + "]. valid values are " + Arrays.toString(Property.values()) + ); } } } else { properties = DEFAULT_PROPERTIES; } - return new AttachmentProcessor(processorTag, description, field, targetField, properties, indexedChars, ignoreMissing, - indexedCharsField); + return new AttachmentProcessor( + processorTag, + description, + field, + targetField, + properties, + indexedChars, + ignoreMissing, + indexedCharsField + ); } } diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index d8b9661e21a70..2451eee8e984b 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -77,15 +77,17 @@ final class TikaImpl { /** Exclude some formats */ - private static final Set EXCLUDES = new HashSet<>(Arrays.asList( - MediaType.application("vnd.ms-visio.drawing"), - MediaType.application("vnd.ms-visio.drawing.macroenabled.12"), - MediaType.application("vnd.ms-visio.stencil"), - MediaType.application("vnd.ms-visio.stencil.macroenabled.12"), - MediaType.application("vnd.ms-visio.template"), - MediaType.application("vnd.ms-visio.template.macroenabled.12"), - MediaType.application("vnd.ms-visio.drawing") - )); + private static final Set EXCLUDES = new HashSet<>( + Arrays.asList( + MediaType.application("vnd.ms-visio.drawing"), + MediaType.application("vnd.ms-visio.drawing.macroenabled.12"), + MediaType.application("vnd.ms-visio.stencil"), + MediaType.application("vnd.ms-visio.stencil.macroenabled.12"), + MediaType.application("vnd.ms-visio.template"), + MediaType.application("vnd.ms-visio.template.macroenabled.12"), + MediaType.application("vnd.ms-visio.drawing") + ) + ); /** subset of parsers for types we support */ private static final Parser PARSERS[] = new Parser[] { @@ -100,8 +102,7 @@ final class TikaImpl { new org.apache.tika.parser.odf.OpenDocumentParser(), new org.apache.tika.parser.iwork.IWorkPackageParser(), new org.apache.tika.parser.xml.DcXMLParser(), - new org.apache.tika.parser.epub.EpubParser(), - }; + new org.apache.tika.parser.epub.EpubParser(), }; /** autodetector based on this subset */ private static final AutoDetectParser PARSER_INSTANCE = new AutoDetectParser(PARSERS); @@ -117,8 +118,10 @@ static String parse(final byte content[], final Metadata metadata, final int lim SpecialPermission.check(); try { - return AccessController.doPrivileged((PrivilegedExceptionAction) - () -> TIKA_INSTANCE.parseToString(new ByteArrayInputStream(content), metadata, limit), RESTRICTED_CONTEXT); + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> TIKA_INSTANCE.parseToString(new ByteArrayInputStream(content), metadata, limit), + RESTRICTED_CONTEXT + ); } catch (PrivilegedActionException e) { // checked exception from tika: unbox it Throwable cause = e.getCause(); @@ -135,9 +138,7 @@ static String parse(final byte content[], final Metadata metadata, final int lim // apply additional containment for parsers, this is intersected with the current permissions // its hairy, but worth it so we don't have some XML flaw reading random crap from the FS private static final AccessControlContext RESTRICTED_CONTEXT = new AccessControlContext( - new ProtectionDomain[] { - new ProtectionDomain(null, getRestrictedPermissions()) - } + new ProtectionDomain[] { new ProtectionDomain(null, getRestrictedPermissions()) } ); // compute some minimal permissions for parsers. they only get r/w access to the java temp directory, @@ -155,7 +156,7 @@ static PermissionCollection getRestrictedPermissions() { addReadPermissions(perms, JarHell.parseClassPath()); // plugin jars if (TikaImpl.class.getClassLoader() instanceof URLClassLoader) { - URL[] urls = ((URLClassLoader)TikaImpl.class.getClassLoader()).getURLs(); + URL[] urls = ((URLClassLoader) TikaImpl.class.getClassLoader()).getURLs(); Set set = new LinkedHashSet<>(Arrays.asList(urls)); if (set.size() != urls.length) { throw new AssertionError("duplicate jars: " + Arrays.toString(urls)); @@ -163,8 +164,13 @@ static PermissionCollection getRestrictedPermissions() { addReadPermissions(perms, set); } // jvm's java.io.tmpdir (needs read/write) - FilePermissionUtils.addDirectoryPath(perms, "java.io.tmpdir", PathUtils.get(System.getProperty("java.io.tmpdir")), - "read,readlink,write,delete", false); + FilePermissionUtils.addDirectoryPath( + perms, + "java.io.tmpdir", + PathUtils.get(System.getProperty("java.io.tmpdir")), + "read,readlink,write,delete", + false + ); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java index c10ec76beea92..e9c87ae3a6109 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java @@ -69,8 +69,16 @@ public class AttachmentProcessorTests extends OpenSearchTestCase { @Before public void createStandardProcessor() { - processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false, null); + processor = new AttachmentProcessor( + randomAlphaOfLength(10), + null, + "source_field", + "target_field", + EnumSet.allOf(AttachmentProcessor.Property.class), + 10000, + false, + null + ); } public void testEnglishTextDocument() throws Exception { @@ -84,9 +92,10 @@ public void testEnglishTextDocument() throws Exception { } public void testHtmlDocumentWithRandomFields() throws Exception { - //date is not present in the html doc - ArrayList fieldsList = new ArrayList<>(EnumSet.complementOf(EnumSet.of - (AttachmentProcessor.Property.DATE))); + // date is not present in the html doc + ArrayList fieldsList = new ArrayList<>( + EnumSet.complementOf(EnumSet.of(AttachmentProcessor.Property.DATE)) + ); Set selectedProperties = new HashSet<>(); int numFields = randomIntBetween(1, fieldsList.size()); @@ -102,8 +111,16 @@ public void testHtmlDocumentWithRandomFields() throws Exception { if (randomBoolean()) { selectedProperties.add(AttachmentProcessor.Property.DATE); } - processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", selectedProperties, 10000, false, null); + processor = new AttachmentProcessor( + randomAlphaOfLength(10), + null, + "source_field", + "target_field", + selectedProperties, + 10000, + false, + null + ); Map attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor); assertThat(attachmentData.keySet(), hasSize(selectedFieldNames.length)); @@ -133,49 +150,51 @@ public void testEmptyTextDocument() throws Exception { public void testWordDocument() throws Exception { Map attachmentData = parseDocument("issue-104.docx", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", - "content_length")); + assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); assertThat(attachmentData.get("content"), is(notNullValue())); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2012-10-12T11:17:00Z")); assertThat(attachmentData.get("author"), is("Windows User")); assertThat(attachmentData.get("content_length"), is(notNullValue())); - assertThat(attachmentData.get("content_type").toString(), - is("application/vnd.openxmlformats-officedocument.wordprocessingml.document")); + assertThat( + attachmentData.get("content_type").toString(), + is("application/vnd.openxmlformats-officedocument.wordprocessingml.document") + ); } public void testWordDocumentWithVisioSchema() throws Exception { Map attachmentData = parseDocument("issue-22077.docx", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", - "content_length")); + assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); assertThat(attachmentData.get("content").toString(), containsString("Table of Contents")); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2015-01-06T18:07:00Z")); assertThat(attachmentData.get("author"), is(notNullValue())); assertThat(attachmentData.get("content_length"), is(notNullValue())); - assertThat(attachmentData.get("content_type").toString(), - is("application/vnd.openxmlformats-officedocument.wordprocessingml.document")); + assertThat( + attachmentData.get("content_type").toString(), + is("application/vnd.openxmlformats-officedocument.wordprocessingml.document") + ); } public void testLegacyWordDocumentWithVisioSchema() throws Exception { Map attachmentData = parseDocument("issue-22077.doc", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", - "content_length")); + assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); assertThat(attachmentData.get("content").toString(), containsString("Table of Contents")); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2016-12-16T15:04:00Z")); assertThat(attachmentData.get("author"), is(notNullValue())); assertThat(attachmentData.get("content_length"), is(notNullValue())); - assertThat(attachmentData.get("content_type").toString(), - is("application/msword")); + assertThat(attachmentData.get("content_type").toString(), is("application/msword")); } public void testPdf() throws Exception { Map attachmentData = parseDocument("test.pdf", processor); - assertThat(attachmentData.get("content"), - is("This is a test, with umlauts, from München\n\nAlso contains newlines for testing.\n\nAnd one more.")); + assertThat( + attachmentData.get("content"), + is("This is a test, with umlauts, from München\n\nAlso contains newlines for testing.\n\nAnd one more.") + ); assertThat(attachmentData.get("content_type").toString(), is("application/pdf")); assertThat(attachmentData.get("content_length"), is(notNullValue())); } @@ -195,8 +214,10 @@ public void testEncryptedPdf() throws Exception { public void testHtmlDocument() throws Exception { Map attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "author", "keywords", "title", "content_type", - "content_length")); + assertThat( + attachmentData.keySet(), + containsInAnyOrder("language", "content", "author", "keywords", "title", "content_type", "content_length") + ); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("content"), is(notNullValue())); assertThat(attachmentData.get("content_length"), is(notNullValue())); @@ -216,8 +237,10 @@ public void testXHtmlDocument() throws Exception { public void testEpubDocument() throws Exception { Map attachmentData = parseDocument("testEPUB.epub", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "author", "title", "content_type", "content_length", - "date", "keywords")); + assertThat( + attachmentData.keySet(), + containsInAnyOrder("language", "content", "author", "title", "content_type", "content_length", "date", "keywords") + ); assertThat(attachmentData.get("content_type").toString(), containsString("application/epub+zip")); } @@ -259,8 +282,10 @@ public void testParseAsBytesArray() throws Exception { } public void testNullValueWithIgnoreMissing() throws Exception { - IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), - Collections.singletonMap("source_field", null)); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument( + random(), + Collections.singletonMap("source_field", null) + ); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", "randomTarget", null, 10, true, null); processor.execute(ingestDocument); @@ -276,8 +301,10 @@ public void testNonExistentWithIgnoreMissing() throws Exception { } public void testNullWithoutIgnoreMissing() throws Exception { - IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), - Collections.singletonMap("source_field", null)); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument( + random(), + Collections.singletonMap("source_field", null) + ); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", "randomTarget", null, 10, false, null); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); @@ -311,8 +338,16 @@ private Map parseDocument(String file, AttachmentProcessor proce } public void testIndexedChars() throws Exception { - processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, null); + processor = new AttachmentProcessor( + randomAlphaOfLength(10), + null, + "source_field", + "target_field", + EnumSet.allOf(AttachmentProcessor.Property.class), + 19, + false, + null + ); Map attachmentData = parseDocument("text-in-english.txt", processor); @@ -322,8 +357,16 @@ public void testIndexedChars() throws Exception { assertThat(attachmentData.get("content_type").toString(), containsString("text/plain")); assertThat(attachmentData.get("content_length"), is(19L)); - processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, "max_length"); + processor = new AttachmentProcessor( + randomAlphaOfLength(10), + null, + "source_field", + "target_field", + EnumSet.allOf(AttachmentProcessor.Property.class), + 19, + false, + "max_length" + ); attachmentData = parseDocument("text-in-english.txt", processor); diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java index 54ebe166eef91..61848b6419685 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaDocTests.java @@ -65,8 +65,8 @@ public void testFiles() throws Exception { try (DirectoryStream stream = Files.newDirectoryStream(tmp)) { for (Path doc : stream) { - logger.debug("parsing: {}", doc); - assertParseable(doc); + logger.debug("parsing: {}", doc); + assertParseable(doc); } } } diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaImplTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaImplTests.java index a237b803f9dff..bd19453319e29 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaImplTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/TikaImplTests.java @@ -36,8 +36,8 @@ public class TikaImplTests extends OpenSearchTestCase { - public void testTikaLoads() throws Exception { - Class.forName("org.opensearch.ingest.attachment.TikaImpl"); - } + public void testTikaLoads() throws Exception { + Class.forName("org.opensearch.ingest.attachment.TikaImpl"); + } } diff --git a/plugins/ingest-attachment/src/yamlRestTest/java/org/opensearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java b/plugins/ingest-attachment/src/yamlRestTest/java/org/opensearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java index 72e6f2e22754d..c1cf9440eb379 100644 --- a/plugins/ingest-attachment/src/yamlRestTest/java/org/opensearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java +++ b/plugins/ingest-attachment/src/yamlRestTest/java/org/opensearch/ingest/attachment/IngestAttachmentClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java b/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java index cf0cd7d8d4288..87933ab3df6be 100644 --- a/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java +++ b/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java @@ -106,16 +106,14 @@ protected void assertParseMaximalWarnings() { protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerUpdateCheck(b -> { - b.field("analyzer", "default"); - b.field("search_analyzer", "keyword"); - }, - m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchAnalyzer().name())); + b.field("analyzer", "default"); + b.field("search_analyzer", "keyword"); + }, m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchAnalyzer().name())); checker.registerUpdateCheck(b -> { - b.field("analyzer", "default"); - b.field("search_analyzer", "keyword"); - b.field("search_quote_analyzer", "keyword"); - }, - m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchQuoteAnalyzer().name())); + b.field("analyzer", "default"); + b.field("search_analyzer", "keyword"); + b.field("search_quote_analyzer", "keyword"); + }, m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchQuoteAnalyzer().name())); checker.registerConflictCheck("store", b -> b.field("store", true)); checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs")); @@ -126,26 +124,20 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10)); // norms can be set from true to false, but not vice versa - checker.registerConflictCheck("norms", - fieldMapping(b -> { - b.field("type", "annotated_text"); - b.field("norms", false); - }), - fieldMapping(b -> { - b.field("type", "annotated_text"); - b.field("norms", true); - })); - checker.registerUpdateCheck( - b -> { - b.field("type", "annotated_text"); - b.field("norms", true); - }, - b -> { - b.field("type", "annotated_text"); - b.field("norms", false); - }, - m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms()) - ); + checker.registerConflictCheck("norms", fieldMapping(b -> { + b.field("type", "annotated_text"); + b.field("norms", false); + }), fieldMapping(b -> { + b.field("type", "annotated_text"); + b.field("norms", true); + })); + checker.registerUpdateCheck(b -> { + b.field("type", "annotated_text"); + b.field("norms", true); + }, b -> { + b.field("type", "annotated_text"); + b.field("norms", false); + }, m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())); checker.registerUpdateCheck(b -> b.field("boost", 2.0), m -> assertEquals(m.fieldType().boost(), 2.0, 0)); } @@ -221,7 +213,6 @@ public void testAnnotationInjection() throws IOException { assertEquals(0, postings.nextDoc()); assertEquals(2, postings.nextPosition()); - assertTrue(terms.seekExact(new BytesRef("hush"))); postings = terms.postings(null, PostingsEnum.POSITIONS); assertEquals(0, postings.nextDoc()); @@ -270,8 +261,7 @@ public void testIndexedTermVectors() throws IOException { })); String text = "the quick [brown](Color) fox jumped over the lazy dog"; - ParsedDocument doc - = mapperService.documentMapper().parse(source(b -> b.field("field", text))); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.field("field", text))); withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> { LeafReader leaf = reader.leaves().get(0).reader(); @@ -282,7 +272,7 @@ public void testIndexedTermVectors() throws IOException { while ((term = iterator.next()) != null) { foundTerms.add(term.utf8ToString()); } - //Check we have both text and annotation tokens + // Check we have both text and annotation tokens assertTrue(foundTerms.contains("brown")); assertTrue(foundTerms.contains("Color")); assertTrue(foundTerms.contains("fox")); @@ -406,62 +396,92 @@ public void testPositionIncrementGap() throws IOException { } public void testSearchAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + String mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "standard") - .field("search_analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "standard") + .field("search_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); DocumentMapper mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); + assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "default") - .field("search_analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "default") + .field("search_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); + assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); + assertEquals(mapping, mapper.mappingSource().toString()); // special case: default search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .field("search_analyzer", "default") - .endObject() - .endObject().endObject().endObject()); + mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .field("search_analyzer", "default") + .endObject() + .endObject() + .endObject() + .endObject() + ); mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); - - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + assertEquals(mapping, mapper.mappingSource().toString()); + + mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); mapper = createDocumentMapper("_doc", mapping); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -476,32 +496,44 @@ public void testSearchAnalyzerSerialization() throws IOException { } public void testSearchQuoteAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + String mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") .startObject("properties") - .startObject("field") - .field("type","annotated_text") - .field("analyzer", "standard") - .field("search_analyzer", "standard") - .field("search_quote_analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "standard") + .field("search_analyzer", "standard") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); DocumentMapper mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); + assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index/search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "default") - .field("search_analyzer", "default") - .field("search_quote_analyzer", "keyword") - .endObject() - .endObject().endObject().endObject()); + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "default") + .field("search_analyzer", "default") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + ); mapper = createDocumentMapper("_doc", mapping); - assertEquals(mapping, mapper.mappingSource().toString()); + assertEquals(mapping, mapper.mappingSource().toString()); } public void testTermVectors() throws IOException { @@ -578,8 +610,7 @@ public void testAnalyzedFieldPositionIncrementWithoutPositions() { b.field("index_options", indexOptions); b.field("position_increment_gap", 0); }))); - assertThat(e.getMessage(), - containsString("Cannot set position_increment_gap on field [field] without positions enabled")); + assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled")); } } diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index ba5f1d0711c7e..e377a7c030f37 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -90,7 +90,7 @@ public class AnnotatedTextFieldMapper extends ParametrizedFieldMapper { private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; private static Builder builder(FieldMapper in) { - return ((AnnotatedTextFieldMapper)in).builder; + return ((AnnotatedTextFieldMapper) in).builder; } public static class Builder extends ParametrizedFieldMapper.Builder { @@ -98,20 +98,22 @@ public static class Builder extends ParametrizedFieldMapper.Builder { private final Parameter store = Parameter.storeParam(m -> builder(m).store.getValue(), false); final TextParams.Analyzers analyzers; - final Parameter similarity - = TextParams.similarity(m -> builder(m).similarity.getValue()); + final Parameter similarity = TextParams.similarity(m -> builder(m).similarity.getValue()); final Parameter indexOptions = TextParams.indexOptions(m -> builder(m).indexOptions.getValue()); final Parameter norms = TextParams.norms(true, m -> builder(m).norms.getValue()); final Parameter termVectors = TextParams.termVectors(m -> builder(m).termVectors.getValue()); - final Parameter positionIncrementGap = Parameter.intParam("position_increment_gap", false, - m -> builder(m).positionIncrementGap.getValue(), POSITION_INCREMENT_GAP_USE_ANALYZER) - .setValidator(v -> { - if (v != POSITION_INCREMENT_GAP_USE_ANALYZER && v < 0) { - throw new MapperParsingException("[positions_increment_gap] must be positive, got [" + v + "]"); - } - }); + final Parameter positionIncrementGap = Parameter.intParam( + "position_increment_gap", + false, + m -> builder(m).positionIncrementGap.getValue(), + POSITION_INCREMENT_GAP_USE_ANALYZER + ).setValidator(v -> { + if (v != POSITION_INCREMENT_GAP_USE_ANALYZER && v < 0) { + throw new MapperParsingException("[positions_increment_gap] must be positive, got [" + v + "]"); + } + }); private final Parameter boost = Parameter.boostParam(); private final Parameter> meta = Parameter.metaParam(); @@ -123,14 +125,23 @@ public Builder(String name, IndexAnalyzers indexAnalyzers) { @Override protected List> getParameters() { - return Arrays.asList(store, indexOptions, norms, termVectors, similarity, - analyzers.indexAnalyzer, analyzers.searchAnalyzer, analyzers.searchQuoteAnalyzer, positionIncrementGap, - boost, meta); + return Arrays.asList( + store, + indexOptions, + norms, + termVectors, + similarity, + analyzers.indexAnalyzer, + analyzers.searchAnalyzer, + analyzers.searchQuoteAnalyzer, + positionIncrementGap, + boost, + meta + ); } private NamedAnalyzer wrapAnalyzer(NamedAnalyzer in, int positionIncrementGap) { - return new NamedAnalyzer(in.name(), AnalyzerScope.INDEX, - new AnnotationAnalyzerWrapper(in.analyzer()), positionIncrementGap); + return new NamedAnalyzer(in.name(), AnalyzerScope.INDEX, new AnnotationAnalyzerWrapper(in.analyzer()), positionIncrementGap); } private AnnotatedTextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { @@ -139,8 +150,9 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, BuilderContex posGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; } else { if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { - throw new IllegalArgumentException("Cannot set position_increment_gap on field [" + name() - + "] without positions enabled"); + throw new IllegalArgumentException( + "Cannot set position_increment_gap on field [" + name() + "] without positions enabled" + ); } posGap = positionIncrementGap.get(); } @@ -148,12 +160,9 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, BuilderContex fieldType, similarity.get(), wrapAnalyzer(analyzers.getSearchAnalyzer(), posGap), - wrapAnalyzer(analyzers.getSearchQuoteAnalyzer(), posGap)); - AnnotatedTextFieldType ft = new AnnotatedTextFieldType( - buildFullName(context), - store.getValue(), - tsi, - meta.getValue()); + wrapAnalyzer(analyzers.getSearchQuoteAnalyzer(), posGap) + ); + AnnotatedTextFieldType ft = new AnnotatedTextFieldType(buildFullName(context), store.getValue(), tsi, meta.getValue()); ft.setIndexAnalyzer(wrapAnalyzer(analyzers.getIndexAnalyzer(), posGap)); ft.setBoost(boost.getValue()); return ft; @@ -162,12 +171,17 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, BuilderContex @Override public AnnotatedTextFieldMapper build(BuilderContext context) { FieldType fieldType = TextParams.buildFieldType(() -> true, store, indexOptions, norms, termVectors); - if (fieldType.indexOptions() == IndexOptions.NONE ) { + if (fieldType.indexOptions() == IndexOptions.NONE) { throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields must be indexed"); } return new AnnotatedTextFieldMapper( - name, fieldType, buildFieldType(fieldType, context), - multiFieldsBuilder.build(this, context), copyTo.build(), this); + name, + fieldType, + buildFieldType(fieldType, context), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); } } @@ -183,16 +197,16 @@ public static final class AnnotatedText { List annotations; // Format is markdown-like syntax for URLs eg: - // "New mayor is [John Smith](type=person&value=John%20Smith) " + // "New mayor is [John Smith](type=person&value=John%20Smith) " static Pattern markdownPattern = Pattern.compile("\\[([^]\\[]*)]\\(([^)(]*)\\)"); - public static AnnotatedText parse (String textPlusMarkup) { - List annotations =new ArrayList<>(); + public static AnnotatedText parse(String textPlusMarkup) { + List annotations = new ArrayList<>(); Matcher m = markdownPattern.matcher(textPlusMarkup); int lastPos = 0; StringBuilder sb = new StringBuilder(); - while(m.find()){ - if(m.start() > lastPos){ + while (m.find()) { + if (m.start() > lastPos) { sb.append(textPlusMarkup, lastPos, m.start()); } @@ -210,9 +224,9 @@ public static AnnotatedText parse (String textPlusMarkup) { throw new OpenSearchParseException("key=value pairs are not supported in annotations"); } if (kv.length == 1) { - //Check "=" sign wasn't in the pair string + // Check "=" sign wasn't in the pair string if (kv[0].length() == pair.length()) { - //untyped value + // untyped value value = URLDecoder.decode(kv[0], "UTF-8"); } } @@ -224,7 +238,7 @@ public static AnnotatedText parse (String textPlusMarkup) { } } } - if(lastPos < textPlusMarkup.length()){ + if (lastPos < textPlusMarkup.length()) { sb.append(textPlusMarkup.substring(lastPos)); } return new AnnotatedText(sb.toString(), textPlusMarkup, annotations); @@ -241,19 +255,22 @@ public static final class AnnotationToken { public final int endOffset; public final String value; + public AnnotationToken(int offset, int endOffset, String value) { this.offset = offset; this.endOffset = endOffset; this.value = value; } + @Override public String toString() { - return value +" ("+offset+" - "+endOffset+")"; + return value + " (" + offset + " - " + endOffset + ")"; } public boolean intersects(int start, int end) { - return (start <= offset && end >= offset) || (start <= endOffset && end >= endOffset) - || (start >= offset && end <= endOffset); + return (start <= offset && end >= offset) + || (start <= endOffset && end >= endOffset) + || (start >= offset && end <= endOffset); } @Override @@ -268,29 +285,27 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; AnnotationToken other = (AnnotationToken) obj; - return Objects.equals(endOffset, other.endOffset) && Objects.equals(offset, other.offset) - && Objects.equals(value, other.value); + return Objects.equals(endOffset, other.endOffset) + && Objects.equals(offset, other.offset) + && Objects.equals(value, other.value); } } @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(textMinusMarkup); - sb.append("\n"); - annotations.forEach(a -> { - sb.append(a); - sb.append("\n"); - }); - return sb.toString(); + StringBuilder sb = new StringBuilder(); + sb.append(textMinusMarkup); + sb.append("\n"); + annotations.forEach(a -> { + sb.append(a); + sb.append("\n"); + }); + return sb.toString(); } public int numAnnotations() { @@ -311,14 +326,14 @@ public static final class AnnotatedHighlighterAnalyzer extends AnalyzerWrapper { private final Analyzer delegate; private AnnotatedText[] annotations; - public AnnotatedHighlighterAnalyzer(Analyzer delegate){ + public AnnotatedHighlighterAnalyzer(Analyzer delegate) { super(delegate.getReuseStrategy()); this.delegate = delegate; } @Override public Analyzer getWrappedAnalyzer(String fieldName) { - return delegate; + return delegate; } public void setAnnotations(AnnotatedText[] annotations) { @@ -344,13 +359,13 @@ public static final class AnnotationAnalyzerWrapper extends AnalyzerWrapper { private final Analyzer delegate; public AnnotationAnalyzerWrapper(Analyzer delegate) { - super(delegate.getReuseStrategy()); - this.delegate = delegate; + super(delegate.getReuseStrategy()); + this.delegate = delegate; } @Override public Analyzer getWrappedAnalyzer(String fieldName) { - return delegate; + return delegate; } @Override @@ -383,7 +398,6 @@ static String readToString(Reader reader) { } } - public static final class AnnotationsInjector extends TokenFilter { private AnnotatedText annotatedText; @@ -400,17 +414,17 @@ public static final class AnnotationsInjector extends TokenFilter { private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); public AnnotationsInjector(TokenStream in) { - super(in); + super(in); } public void setAnnotations(AnnotatedText annotatedText) { - this.annotatedText = annotatedText; - currentAnnotationIndex = 0; - if(annotatedText!=null && annotatedText.numAnnotations()>0){ - nextAnnotationForInjection = annotatedText.getAnnotation(0); - } else { - nextAnnotationForInjection = null; - } + this.annotatedText = annotatedText; + currentAnnotationIndex = 0; + if (annotatedText != null && annotatedText.numAnnotations() > 0) { + nextAnnotationForInjection = annotatedText.getAnnotation(0); + } else { + nextAnnotationForInjection = null; + } } @Override @@ -423,17 +437,17 @@ public void reset() throws IOException { // Abstracts if we are pulling from some pre-cached buffer of // text tokens or directly from the wrapped TokenStream - private boolean internalNextToken() throws IOException{ - if (pendingStatePos < pendingStates.size()){ + private boolean internalNextToken() throws IOException { + if (pendingStatePos < pendingStates.size()) { restoreState(pendingStates.get(pendingStatePos)); - pendingStatePos ++; - if(pendingStatePos >=pendingStates.size()){ - pendingStatePos =0; + pendingStatePos++; + if (pendingStatePos >= pendingStates.size()) { + pendingStatePos = 0; pendingStates.clear(); } return true; } - if(inputExhausted) { + if (inputExhausted) { return false; } return input.incrementToken(); @@ -458,7 +472,7 @@ public boolean incrementToken() throws IOException { // Buffer up all the other tokens spanned by this annotation to determine length. if (input.incrementToken()) { if (textOffsetAtt.endOffset() <= nextAnnotationForInjection.endOffset - && textOffsetAtt.startOffset() < nextAnnotationForInjection.endOffset) { + && textOffsetAtt.startOffset() < nextAnnotationForInjection.endOffset) { annotationPosLen += posAtt.getPositionIncrement(); } pendingStates.add(captureState()); @@ -479,7 +493,7 @@ public boolean incrementToken() throws IOException { } private void setType() { - //Default annotation type - in future AnnotationTokens may contain custom type info + // Default annotation type - in future AnnotationTokens may contain custom type info typeAtt.setType("annotation"); } @@ -494,22 +508,20 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th final AnnotatedText.AnnotationToken firstAnnotationAtThisPos = nextAnnotationForInjection; while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) { - setType(); termAtt.resizeBuffer(nextAnnotationForInjection.value.length()); termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length()); if (nextAnnotationForInjection == firstAnnotationAtThisPos) { posAtt.setPositionIncrement(firstSpannedTextPosInc); - //Put at the head of the queue of tokens to be emitted + // Put at the head of the queue of tokens to be emitted pendingStates.add(0, captureState()); } else { posAtt.setPositionIncrement(0); - //Put after the head of the queue of tokens to be emitted + // Put after the head of the queue of tokens to be emitted pendingStates.add(1, captureState()); } - // Flag the inject annotation as null to prevent re-injection. currentAnnotationIndex++; if (currentAnnotationIndex < annotatedText.numAnnotations()) { @@ -522,7 +534,7 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th internalNextToken(); } - } + } public static final class AnnotatedTextFieldType extends TextFieldMapper.TextFieldType { @@ -543,8 +555,14 @@ public String typeName() { private final FieldType fieldType; private final Builder builder; - protected AnnotatedTextFieldMapper(String simpleName, FieldType fieldType, AnnotatedTextFieldType mappedFieldType, - MultiFields multiFields, CopyTo copyTo, Builder builder) { + protected AnnotatedTextFieldMapper( + String simpleName, + FieldType fieldType, + AnnotatedTextFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + Builder builder + ) { super(simpleName, mappedFieldType, multiFields, copyTo); assert fieldType.tokenized(); this.fieldType = fieldType; diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java index f83cc12609632..8a94fea0ebbd4 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java @@ -52,7 +52,6 @@ */ public class AnnotatedPassageFormatter extends PassageFormatter { - public static final String SEARCH_HIT_TYPE = "_hit_term"; private final Encoder encoder; AnnotatedText[] annotations; @@ -70,72 +69,79 @@ static class MarkupPassage { int lastMarkupEnd = -1; public void addUnlessOverlapping(Markup newMarkup) { - + // Fast exit. - if(newMarkup.start > lastMarkupEnd) { + if (newMarkup.start > lastMarkupEnd) { markups.add(newMarkup); - lastMarkupEnd = newMarkup.end; + lastMarkupEnd = newMarkup.end; return; } - + // Check to see if this new markup overlaps with any prior - int index=0; - for (Markup existingMarkup: markups) { - if(existingMarkup.samePosition(newMarkup)) { + int index = 0; + for (Markup existingMarkup : markups) { + if (existingMarkup.samePosition(newMarkup)) { existingMarkup.merge(newMarkup); return; } - if(existingMarkup.overlaps(newMarkup)) { + if (existingMarkup.overlaps(newMarkup)) { // existing markup wins - we throw away the new markup that would span this position return; } - // markup list is in start offset order so we can insert at this position then shift others right - if(existingMarkup.isAfter(newMarkup)) { + // markup list is in start offset order so we can insert at this position then shift others right + if (existingMarkup.isAfter(newMarkup)) { markups.add(index, newMarkup); return; } index++; } markups.add(newMarkup); - lastMarkupEnd = newMarkup.end; + lastMarkupEnd = newMarkup.end; } - + } + static class Markup { int start; int end; String metadata; + Markup(int start, int end, String metadata) { super(); this.start = start; this.end = end; this.metadata = metadata; } + boolean isAfter(Markup other) { return start > other.end; } + void merge(Markup newMarkup) { - // metadata is key1=value&key2=value&.... syntax used for urls + // metadata is key1=value&key2=value&.... syntax used for urls assert samePosition(newMarkup); metadata += "&" + newMarkup.metadata; } + boolean samePosition(Markup other) { return this.start == other.start && this.end == other.end; } + boolean overlaps(Markup other) { - return (start<=other.start && end >= other.start) - || (start <= other.end && end >=other.end) - || (start>=other.start && end<=other.end); + return (start <= other.start && end >= other.start) + || (start <= other.end && end >= other.end) + || (start >= other.start && end <= other.end); } + @Override public String toString() { return "Markup [start=" + start + ", end=" + end + ", metadata=" + metadata + "]"; } - - + } + // Merge original annotations and search hits into a single set of markups for each passage - static MarkupPassage mergeAnnotations(AnnotationToken [] annotations, Passage passage){ + static MarkupPassage mergeAnnotations(AnnotationToken[] annotations, Passage passage) { try { MarkupPassage markupPassage = new MarkupPassage(); @@ -144,28 +150,31 @@ static MarkupPassage mergeAnnotations(AnnotationToken [] annotations, Passage pa int start = passage.getMatchStarts()[i]; int end = passage.getMatchEnds()[i]; String searchTerm = passage.getMatchTerms()[i].utf8ToString(); - Markup markup = new Markup(start, end, SEARCH_HIT_TYPE+"="+URLEncoder.encode(searchTerm, StandardCharsets.UTF_8.name())); + Markup markup = new Markup( + start, + end, + SEARCH_HIT_TYPE + "=" + URLEncoder.encode(searchTerm, StandardCharsets.UTF_8.name()) + ); markupPassage.addUnlessOverlapping(markup); } - + // Now add original text's annotations - ignoring any that might conflict with the search hits markup. - for (AnnotationToken token: annotations) { + for (AnnotationToken token : annotations) { int start = token.offset; int end = token.endOffset; - if(start >= passage.getStartOffset() && end<=passage.getEndOffset()) { + if (start >= passage.getStartOffset() && end <= passage.getEndOffset()) { String escapedValue = URLEncoder.encode(token.value, StandardCharsets.UTF_8.name()); Markup markup = new Markup(start, end, escapedValue); - markupPassage.addUnlessOverlapping(markup); + markupPassage.addUnlessOverlapping(markup); } } return markupPassage; - + } catch (UnsupportedEncodingException e) { // We should always have UTF-8 support throw new IllegalStateException(e); } } - @Override public Snippet[] format(Passage[] passages, String content) { @@ -174,13 +183,12 @@ public Snippet[] format(Passage[] passages, String content) { int pos; int j = 0; for (Passage passage : passages) { - AnnotationToken [] annotations = getIntersectingAnnotations(passage.getStartOffset(), - passage.getEndOffset()); + AnnotationToken[] annotations = getIntersectingAnnotations(passage.getStartOffset(), passage.getEndOffset()); MarkupPassage mergedMarkup = mergeAnnotations(annotations, passage); - + StringBuilder sb = new StringBuilder(); - pos = passage.getStartOffset(); - for(Markup markup: mergedMarkup.markups) { + pos = passage.getStartOffset(); + for (Markup markup : mergedMarkup.markups) { int start = markup.start; int end = markup.end; // its possible to have overlapping terms @@ -190,7 +198,7 @@ public Snippet[] format(Passage[] passages, String content) { if (end > pos) { sb.append("["); append(sb, content, Math.max(pos, start), end); - + sb.append("]("); sb.append(markup.metadata); sb.append(")"); @@ -199,38 +207,38 @@ public Snippet[] format(Passage[] passages, String content) { } // its possible a "term" from the analyzer could span a sentence boundary. append(sb, content, pos, Math.max(pos, passage.getEndOffset())); - //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) + // we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { sb.deleteCharAt(sb.length() - 1); } else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) { sb.deleteCharAt(sb.length() - 1); } - //and we trim the snippets too + // and we trim the snippets too snippets[j++] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); - } + } return snippets; } - + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { List intersectingAnnotations = new ArrayList<>(); - int fieldValueOffset =0; + int fieldValueOffset = 0; for (AnnotatedText fieldValueAnnotations : this.annotations) { - //This is called from a highlighter where all of the field values are concatenated - // so each annotation offset will need to be adjusted so that it takes into account + // This is called from a highlighter where all of the field values are concatenated + // so each annotation offset will need to be adjusted so that it takes into account // the previous values AND the MULTIVAL delimiter for (int i = 0; i < fieldValueAnnotations.numAnnotations(); i++) { AnnotationToken token = fieldValueAnnotations.getAnnotation(i); if (token.intersects(start - fieldValueOffset, end - fieldValueOffset)) { - intersectingAnnotations - .add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + - fieldValueOffset, token.value)); + intersectingAnnotations.add( + new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + fieldValueOffset, token.value) + ); } } - //add 1 for the fieldvalue separator character - fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; + // add 1 for the fieldvalue separator character + fieldValueOffset += fieldValueAnnotations.textMinusMarkup.length() + 1; } return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); - } + } private void append(StringBuilder dest, String content, int start, int end) { dest.append(encoder.encodeText(content.substring(start, end))); diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java index ab2243eb38ed5..af94bcfa79367 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java @@ -56,9 +56,9 @@ public void testIntervals() throws IOException { } public void testFetchSourceValue() throws IOException { - MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field", createDefaultIndexAnalyzers()) - .build(new Mapper.BuilderContext(Settings.EMPTY, new ContentPath())) - .fieldType(); + MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field", createDefaultIndexAnalyzers()).build( + new Mapper.BuilderContext(Settings.EMPTY, new ContentPath()) + ).fieldType(); assertEquals(Collections.singletonList("value"), fetchSourceValue(fieldType, "value")); assertEquals(Collections.singletonList("42"), fetchSourceValue(fieldType, 42L)); diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java index c608f7b0e2ffd..5fcb009d47180 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextParsingTests.java @@ -54,33 +54,35 @@ private void checkParsing(String markup, String expectedPlainText, AnnotationTok } public void testSingleValueMarkup() { - checkParsing("foo [bar](Y)", "foo bar", new AnnotationToken(4,7,"Y")); + checkParsing("foo [bar](Y)", "foo bar", new AnnotationToken(4, 7, "Y")); } public void testMultiValueMarkup() { - checkParsing("foo [bar](Y&B)", "foo bar", new AnnotationToken(4,7,"Y"), - new AnnotationToken(4,7,"B")); + checkParsing("foo [bar](Y&B)", "foo bar", new AnnotationToken(4, 7, "Y"), new AnnotationToken(4, 7, "B")); } public void testBlankTextAnnotation() { - checkParsing("It sounded like this:[](theSoundOfOneHandClapping)", "It sounded like this:", - new AnnotationToken(21,21,"theSoundOfOneHandClapping")); + checkParsing( + "It sounded like this:[](theSoundOfOneHandClapping)", + "It sounded like this:", + new AnnotationToken(21, 21, "theSoundOfOneHandClapping") + ); } public void testMissingBracket() { - checkParsing("[foo](MissingEndBracket bar", - "[foo](MissingEndBracket bar", new AnnotationToken[0]); + checkParsing("[foo](MissingEndBracket bar", "[foo](MissingEndBracket bar", new AnnotationToken[0]); } public void testAnnotationWithType() { - Exception expectedException = expectThrows(OpenSearchParseException.class, - () -> checkParsing("foo [bar](type=foo) baz", "foo bar baz", new AnnotationToken(4,7, "noType"))); - assertThat(expectedException.getMessage(), equalTo("key=value pairs are not supported in annotations")); + Exception expectedException = expectThrows( + OpenSearchParseException.class, + () -> checkParsing("foo [bar](type=foo) baz", "foo bar baz", new AnnotationToken(4, 7, "noType")) + ); + assertThat(expectedException.getMessage(), equalTo("key=value pairs are not supported in annotations")); } public void testMissingValue() { checkParsing("[foo]() bar", "foo bar", new AnnotationToken[0]); } - } diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java index e4cd9a2d25ab9..dd2ee23355c1e 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java @@ -72,10 +72,15 @@ public class AnnotatedTextHighlighterTests extends OpenSearchTestCase { - private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, - Query query, Locale locale, BreakIterator breakIterator, - int noMatchSize, String[] expectedPassages) throws Exception { - + private void assertHighlightOneDoc( + String fieldName, + String[] markedUpInputs, + Query query, + Locale locale, + BreakIterator breakIterator, + int noMatchSize, + String[] expectedPassages + ) throws Exception { // Annotated fields wrap the usual analyzer with one that injects extra tokens Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); @@ -144,7 +149,6 @@ private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, dir.close(); } - public void testAnnotatedTextStructuredMatch() throws Exception { // Check that a structured token eg a URL can be highlighted in a query // on marked-up @@ -152,37 +156,49 @@ public void testAnnotatedTextStructuredMatch() throws Exception { String url = "https://en.wikipedia.org/wiki/Key_Word_in_Context"; String encodedUrl = URLEncoder.encode(url, "UTF-8"); String annotatedWord = "[highlighting](" + encodedUrl + ")"; - String highlightedAnnotatedWord = "[highlighting](" + AnnotatedPassageFormatter.SEARCH_HIT_TYPE + "=" + encodedUrl + "&" - + encodedUrl + ")"; - final String[] markedUpInputs = { "This is a test. Just a test1 " + annotatedWord + " from [annotated](bar) highlighter.", - "This is the second " + annotatedWord + " value to perform highlighting on a longer text that gets scored lower." }; + String highlightedAnnotatedWord = "[highlighting](" + + AnnotatedPassageFormatter.SEARCH_HIT_TYPE + + "=" + + encodedUrl + + "&" + + encodedUrl + + ")"; + final String[] markedUpInputs = { + "This is a test. Just a test1 " + annotatedWord + " from [annotated](bar) highlighter.", + "This is the second " + annotatedWord + " value to perform highlighting on a longer text that gets scored lower." }; String[] expectedPassages = { - "This is a test. Just a test1 " + highlightedAnnotatedWord + " from [annotated](bar) highlighter.", - "This is the second " + highlightedAnnotatedWord + " value to perform highlighting on a" - + " longer text that gets scored lower." }; + "This is a test. Just a test1 " + highlightedAnnotatedWord + " from [annotated](bar) highlighter.", + "This is the second " + + highlightedAnnotatedWord + + " value to perform highlighting on a" + + " longer text that gets scored lower." }; Query query = new TermQuery(new Term("text", url)); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); } public void testAnnotatedTextOverlapsWithUnstructuredSearchTerms() throws Exception { - final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore", - "Donald duck is a [Disney](Disney+Inc) invention" }; + final String[] markedUpInputs = { + "[Donald Trump](Donald+Trump) visited Singapore", + "Donald duck is a [Disney](Disney+Inc) invention" }; - String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", - "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; + String[] expectedPassages = { + "[Donald](_hit_term=donald) Trump visited Singapore", + "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; Query query = new TermQuery(new Term("text", "donald")); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); } public void testAnnotatedTextMultiFieldWithBreakIterator() throws Exception { - final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald", - "Donald duck is a [Disney](Disney+Inc) invention" }; - String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", - "Kim shook hands with [Donald](_hit_term=donald)", - "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; + final String[] markedUpInputs = { + "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald", + "Donald duck is a [Disney](Disney+Inc) invention" }; + String[] expectedPassages = { + "[Donald](_hit_term=donald) Trump visited Singapore", + "Kim shook hands with [Donald](_hit_term=donald)", + "[Donald](_hit_term=donald) duck is a [Disney](Disney+Inc) invention" }; Query query = new TermQuery(new Term("text", "donald")); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); breakIterator = new SplittingBreakIterator(breakIterator, '.'); @@ -190,9 +206,10 @@ public void testAnnotatedTextMultiFieldWithBreakIterator() throws Exception { } public void testAnnotatedTextSingleFieldWithBreakIterator() throws Exception { - final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald"}; - String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore", - "Kim shook hands with [Donald](_hit_term=donald)"}; + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald" }; + String[] expectedPassages = { + "[Donald](_hit_term=donald) Trump visited Singapore", + "Kim shook hands with [Donald](_hit_term=donald)" }; Query query = new TermQuery(new Term("text", "donald")); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); breakIterator = new SplittingBreakIterator(breakIterator, '.'); @@ -200,17 +217,16 @@ public void testAnnotatedTextSingleFieldWithBreakIterator() throws Exception { } public void testAnnotatedTextSingleFieldWithPhraseQuery() throws Exception { - final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore", - "Donald Jr was with Melania Trump"}; - String[] expectedPassages = { "[Donald](_hit_term=donald) [Trump](_hit_term=trump) visited Singapore"}; + final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore", "Donald Jr was with Melania Trump" }; + String[] expectedPassages = { "[Donald](_hit_term=donald) [Trump](_hit_term=trump) visited Singapore" }; Query query = new PhraseQuery("text", "donald", "trump"); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); } public void testBadAnnotation() throws Exception { - final String[] markedUpInputs = { "Missing bracket for [Donald Trump](Donald+Trump visited Singapore"}; - String[] expectedPassages = { "Missing bracket for [Donald Trump](Donald+Trump visited [Singapore](_hit_term=singapore)"}; + final String[] markedUpInputs = { "Missing bracket for [Donald Trump](Donald+Trump visited Singapore" }; + String[] expectedPassages = { "Missing bracket for [Donald Trump](Donald+Trump visited [Singapore](_hit_term=singapore)" }; Query query = new TermQuery(new Term("text", "singapore")); BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java b/plugins/mapper-annotated-text/src/yamlRestTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java index ad12a35c3060b..d834df09d1a7c 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java +++ b/plugins/mapper-annotated-text/src/yamlRestTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return createParameters(); } } - diff --git a/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java index 824e9dabdc0df..ffbb1905bcd98 100644 --- a/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -96,7 +96,8 @@ public Murmur3FieldMapper build(BuilderContext context) { name, new Murmur3FieldType(buildFullName(context), stored.getValue(), meta.getValue()), multiFieldsBuilder.build(this, context), - copyTo.build()); + copyTo.build() + ); } } @@ -130,10 +131,7 @@ public Query termQuery(Object value, QueryShardContext context) { } } - protected Murmur3FieldMapper(String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo) { + protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { super(simpleName, mappedFieldType, multiFields, copyTo); } @@ -148,8 +146,7 @@ protected String contentType() { } @Override - protected void parseCreateField(ParseContext context) - throws IOException { + protected void parseCreateField(ParseContext context) throws IOException { final Object value; if (context.externalValueSet()) { value = context.externalValue(); diff --git a/plugins/mapper-murmur3/src/yamlRestTest/java/org/opensearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java b/plugins/mapper-murmur3/src/yamlRestTest/java/org/opensearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java index e2441fb8b9f49..df24a44b06937 100644 --- a/plugins/mapper-murmur3/src/yamlRestTest/java/org/opensearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java +++ b/plugins/mapper-murmur3/src/yamlRestTest/java/org/opensearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return createParameters(); } } - diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 1204e3381351e..4811c7d12759c 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -64,19 +64,26 @@ public void testThatUpdatingMappingShouldNotRemoveSizeMappingConfiguration() thr String index = "foo"; String type = "mytype"; - XContentBuilder builder = - jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); + XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder)); // check mapping again assertSizeMappingEnabled(index, type, true); // update some field in the mapping - XContentBuilder updateMappingBuilder = - jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "text") - .endObject().endObject().endObject(); - AcknowledgedResponse putMappingResponse = - client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); + XContentBuilder updateMappingBuilder = jsonBuilder().startObject() + .startObject("properties") + .startObject("otherField") + .field("type", "text") + .endObject() + .endObject() + .endObject(); + AcknowledgedResponse putMappingResponse = client().admin() + .indices() + .preparePutMapping(index) + .setType(type) + .setSource(updateMappingBuilder) + .get(); assertAcked(putMappingResponse); // make sure size field is still in mapping @@ -87,18 +94,24 @@ public void testThatSizeCanBeSwitchedOnAndOff() throws Exception { String index = "foo"; String type = "mytype"; - XContentBuilder builder = - jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); + XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder)); // check mapping again assertSizeMappingEnabled(index, type, true); // update some field in the mapping - XContentBuilder updateMappingBuilder = - jsonBuilder().startObject().startObject("_size").field("enabled", false).endObject().endObject(); - AcknowledgedResponse putMappingResponse = - client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); + XContentBuilder updateMappingBuilder = jsonBuilder().startObject() + .startObject("_size") + .field("enabled", false) + .endObject() + .endObject(); + AcknowledgedResponse putMappingResponse = client().admin() + .indices() + .preparePutMapping(index) + .setType(type) + .setSource(updateMappingBuilder) + .get(); assertAcked(putMappingResponse); // make sure size field is still in mapping @@ -106,10 +119,13 @@ public void testThatSizeCanBeSwitchedOnAndOff() throws Exception { } private void assertSizeMappingEnabled(String index, String type, boolean enabled) throws IOException { - String errMsg = String.format(Locale.ROOT, - "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s/%s", index, type); - GetMappingsResponse getMappingsResponse = - client().admin().indices().prepareGetMappings(index).addTypes(type).get(); + String errMsg = String.format( + Locale.ROOT, + "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s/%s", + index, + type + ); + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get(); Map mappingSource = getMappingsResponse.getMappings().get(index).get(type).getSourceAsMap(); assertThat(errMsg, mappingSource, hasKey("_size")); String sizeAsString = mappingSource.get("_size").toString(); @@ -120,8 +136,7 @@ private void assertSizeMappingEnabled(String index, String type, boolean enabled public void testBasic() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "_size", "enabled=true")); final String source = "{\"f\":10}"; - indexRandom(true, - client().prepareIndex("test", "type", "1").setSource(source, XContentType.JSON)); + indexRandom(true, client().prepareIndex("test", "type", "1").setSource(source, XContentType.JSON)); GetResponse getResponse = client().prepareGet("test", "type", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java index e64e03f90bac5..e39439c1a3b4f 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java @@ -65,11 +65,7 @@ public void testSizeEnabled() throws Exception { IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true"); DocumentMapper docMapper = service.mapperService().documentMapper("type"); - BytesReference source = BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject()); + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON)); boolean stored = false; @@ -86,11 +82,7 @@ public void testSizeDisabled() throws Exception { IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=false"); DocumentMapper docMapper = service.mapperService().documentMapper("type"); - BytesReference source = BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject()); + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON)); assertThat(doc.rootDoc().getField("_size"), nullValue()); @@ -100,11 +92,7 @@ public void testSizeNotSet() throws Exception { IndexService service = createIndex("test", Settings.EMPTY, "type"); DocumentMapper docMapper = service.mapperService().documentMapper("type"); - BytesReference source = BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject()); + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON)); assertThat(doc.rootDoc().getField("_size"), nullValue()); @@ -115,11 +103,18 @@ public void testThatDisablingWorksWhenMerging() throws Exception { DocumentMapper docMapper = service.mapperService().documentMapper("type"); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(true)); - String disabledMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", false).endObject() - .endObject().endObject()); - docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping), - MapperService.MergeReason.MAPPING_UPDATE); + String disabledMapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_size") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + ); + docMapper = service.mapperService() + .merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); } diff --git a/plugins/mapper-size/src/main/java/org/opensearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/opensearch/index/mapper/size/SizeFieldMapper.java index 984a0c2f7f79e..a937b5358e366 100644 --- a/plugins/mapper-size/src/main/java/org/opensearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/opensearch/index/mapper/size/SizeFieldMapper.java @@ -54,8 +54,7 @@ private static SizeFieldMapper toType(FieldMapper in) { public static class Builder extends MetadataFieldMapper.Builder { - private final Parameter> enabled - = updateableBoolParam("enabled", m -> toType(m).enabled, false); + private final Parameter> enabled = updateableBoolParam("enabled", m -> toType(m).enabled, false); private Builder() { super(NAME); diff --git a/plugins/mapper-size/src/yamlRestTest/java/org/opensearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java b/plugins/mapper-size/src/yamlRestTest/java/org/opensearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java index 5ff61435baa01..2f0cc60ee0010 100644 --- a/plugins/mapper-size/src/yamlRestTest/java/org/opensearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java +++ b/plugins/mapper-size/src/yamlRestTest/java/org/opensearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return createParameters(); } } - diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index fe849187415ab..4b11f2e3305e6 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -65,7 +65,7 @@ public class AzureBlobStoreRepositoryTests extends OpenSearchMockAPIBasedReposit public static void shutdownSchedulers() { Schedulers.shutdownNow(); } - + @Override protected String repositoryType() { return AzureRepository.TYPE; @@ -125,10 +125,16 @@ AzureStorageService createAzureStoreService(final Settings settings) { return new AzureStorageService(settings) { @Override RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSettings, String secondaryHost) { - return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - 1, 100L, 500L, secondaryHost); + return new RequestRetryOptions( + RetryPolicyType.EXPONENTIAL, + azureStorageSettings.getMaxRetries(), + 1, + 100L, + 500L, + secondaryHost + ); } - + @Override ParallelTransferOptions getBlobRequestOptionsForWriteBlob() { return new ParallelTransferOptions().setMaxSingleUploadSizeLong(ByteSizeUnit.MB.toBytes(1)); diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 976d134f939e1..6d71a65a35a4c 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -65,7 +65,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi public static void shutdownSchedulers() { Schedulers.shutdownNow(); } - + @Override protected Collection> getPlugins() { return pluginList(AzureRepositoryPlugin.class); @@ -75,10 +75,7 @@ protected Collection> getPlugins() { protected Settings nodeSettings() { final String endpoint = System.getProperty("test.azure.endpoint_suffix"); if (Strings.hasText(endpoint)) { - return Settings.builder() - .put(super.nodeSettings()) - .put("azure.client.default.endpoint_suffix", endpoint) - .build(); + return Settings.builder().put(super.nodeSettings()).put("azure.client.default.endpoint_suffix", endpoint).build(); } return super.nodeSettings(); } @@ -107,12 +104,16 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = client().admin() + .cluster() + .preparePutRepository(repoName) .setType("azure") - .setSettings(Settings.builder() - .put("container", System.getProperty("test.azure.container")) - .put("base_path", System.getProperty("test.azure.base")) - ).get(); + .setSettings( + Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")) + ) + .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); if (Strings.hasText(System.getProperty("test.azure.sas_token"))) { ensureSasTokenPermissions(); @@ -129,9 +130,12 @@ private void ensureSasTokenPermissions() { final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(blobStore.toString()); try { SocketAccess.doPrivilegedException(() -> blobContainer.existsWithResponse(null, client.v2().get())); - future.onFailure(new RuntimeException( - "The SAS token used in this test allowed for checking container existence. This test only supports tokens " + - "that grant only the documented permission requirements for the Azure repository plugin.")); + future.onFailure( + new RuntimeException( + "The SAS token used in this test allowed for checking container existence. This test only supports tokens " + + "that grant only the documented permission requirements for the Azure repository plugin." + ) + ); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_FORBIDDEN) { future.onResponse(null); diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java index 1b62cb92c8563..d6fa72221f408 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java @@ -95,7 +95,7 @@ private InputStream openInputStream(String blobName, long position, @Nullable Lo // On Azure, if the location path is a secondary location, and the blob does not // exist, instead of returning immediately from the getInputStream call below // with a 404 StorageException, Azure keeps trying and trying for a long timeout - // before throwing a storage exception. This can cause long delays in retrieving + // before throwing a storage exception. This can cause long delays in retrieving // snapshots, so we first check if the blob exists before trying to open an input // stream to it. throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); @@ -157,8 +157,10 @@ public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOExce if (blobNames.isEmpty()) { result.onResponse(null); } else { - final GroupedActionListener listener = - new GroupedActionListener<>(ActionListener.map(result, v -> null), blobNames.size()); + final GroupedActionListener listener = new GroupedActionListener<>( + ActionListener.map(result, v -> null), + blobNames.size() + ); final ExecutorService executor = threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME); // Executing deletes in parallel since Azure SDK 8 is using blocking IO while Azure does not provide a bulk delete API endpoint // TODO: Upgrade to newer non-blocking Azure SDK 11 and execute delete requests in parallel that way. diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java index deefe2d347e1b..d4f3acf0a5c66 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java @@ -121,7 +121,7 @@ public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service, if (response.getStatusCode() >= 300) { return; } - + final HttpMethod method = request.getHttpMethod(); if (method.equals(HttpMethod.HEAD)) { stats.headOperations.incrementAndGet(); @@ -137,7 +137,7 @@ public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service, } else if (method.equals(HttpMethod.PUT)) { final String query = request.getUrl().getQuery(); final String queryParams = (query == null) ? "" : query; - + // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list if (queryParams.contains("comp=block") && queryParams.contains("blockid=")) { @@ -200,13 +200,13 @@ public void deleteBlob(String blob) throws URISyntaxException, BlobStorageExcept final BlobClient azureBlob = blobContainer.getBlobClient(blob); logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); final Response response = azureBlob.deleteWithResponse(null, null, timeout(), client.v2().get()); - logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] deleted status [{}].", container, - blob, response.getStatusCode())); + logger.trace( + () -> new ParameterizedMessage("container [{}]: blob [{}] deleted status [{}].", container, blob, response.getStatusCode()) + ); }); } - public DeleteResult deleteBlobDirectory(String path, Executor executor) - throws URISyntaxException, BlobStorageException, IOException { + public DeleteResult deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, BlobStorageException, IOException { final Tuple> client = client(); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); @@ -220,7 +220,7 @@ public DeleteResult deleteBlobDirectory(String path, Executor executor) for (final BlobItem blobItem : blobContainer.listBlobs(listBlobsOptions, timeout())) { // Skipping prefixes as those are not deletable and should not be there assert (blobItem.isPrefix() == null || !blobItem.isPrefix()) : "Only blobs (not prefixes) are expected"; - + outstanding.incrementAndGet(); executor.execute(new AbstractRunnable() { @Override @@ -228,11 +228,18 @@ protected void doRun() throws Exception { final long len = blobItem.getProperties().getContentLength(); final BlobClient azureBlob = blobContainer.getBlobClient(blobItem.getName()); - logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", - container, blobItem.getName())); + logger.trace( + () -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blobItem.getName()) + ); final Response response = azureBlob.deleteWithResponse(null, null, timeout(), client.v2().get()); - logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] deleted status [{}].", container, - blobItem.getName(), response.getStatusCode())); + logger.trace( + () -> new ParameterizedMessage( + "container [{}]: blob [{}] deleted status [{}].", + container, + blobItem.getName(), + response.getStatusCode() + ) + ); blobsDeleted.incrementAndGet(); if (len >= 0) { @@ -281,8 +288,7 @@ public InputStream getInputStream(String blob, long position, @Nullable Long len }); } - public Map listBlobsByPrefix(String keyPath, String prefix) - throws URISyntaxException, BlobStorageException { + public Map listBlobsByPrefix(String keyPath, String prefix) throws URISyntaxException, BlobStorageException { final Map blobsBuilder = new HashMap(); final Tuple> client = client(); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); @@ -291,17 +297,16 @@ public Map listBlobsByPrefix(String keyPath, String prefix // NOTE: this should be here: if (prefix == null) prefix = ""; // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! - final ListBlobsOptions listBlobsOptions = new ListBlobsOptions() - .setDetails(new BlobListDetails().setRetrieveMetadata(true)) + final ListBlobsOptions listBlobsOptions = new ListBlobsOptions().setDetails(new BlobListDetails().setRetrieveMetadata(true)) .setPrefix(keyPath + (prefix == null ? "" : prefix)); - + SocketAccess.doPrivilegedVoidException(() -> { - for (final BlobItem blobItem: blobContainer.listBlobsByHierarchy("/", listBlobsOptions, timeout())) { + for (final BlobItem blobItem : blobContainer.listBlobsByHierarchy("/", listBlobsOptions, timeout())) { // Skipping over the prefixes, only look for the blobs if (blobItem.isPrefix() != null && blobItem.isPrefix()) { continue; } - + final String name = getBlobName(blobItem.getName(), container, keyPath); logger.trace(() -> new ParameterizedMessage("blob name [{}]", name)); @@ -320,13 +325,12 @@ public Map children(BlobPath path) throws URISyntaxExcept final Tuple> client = client(); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); final String keyPath = path.buildAsString(); - - final ListBlobsOptions listBlobsOptions = new ListBlobsOptions() - .setDetails(new BlobListDetails().setRetrieveMetadata(true)) + + final ListBlobsOptions listBlobsOptions = new ListBlobsOptions().setDetails(new BlobListDetails().setRetrieveMetadata(true)) .setPrefix(keyPath); SocketAccess.doPrivilegedVoidException(() -> { - for (final BlobItem blobItem: blobContainer.listBlobsByHierarchy("/", listBlobsOptions, timeout())) { + for (final BlobItem blobItem : blobContainer.listBlobsByHierarchy("/", listBlobsOptions, timeout())) { // Skipping over the blobs, only look for prefixes if (blobItem.isPrefix() != null && blobItem.isPrefix()) { // Expecting name in the form /container/keyPath.* and we want to strip off the /container/ @@ -336,17 +340,20 @@ public Map children(BlobPath path) throws URISyntaxExcept logger.trace(() -> new ParameterizedMessage("blob name [{}]", name)); blobsBuilder.add(name); } - }; + } + ; }); - return Collections.unmodifiableMap(blobsBuilder.stream().collect( - Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this, threadPool)))); + return Collections.unmodifiableMap( + blobsBuilder.stream() + .collect(Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this, threadPool))) + ); } - public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) - throws URISyntaxException, BlobStorageException, IOException { - assert inputStream.markSupported() - : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, + BlobStorageException, IOException { + assert inputStream + .markSupported() : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); final Tuple> client = client(); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); @@ -356,39 +363,42 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b if (failIfAlreadyExists) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } - + SocketAccess.doPrivilegedVoidException(() -> { final Response response = blob.uploadWithResponse( - new BlobParallelUploadOptions(inputStream, blobSize) - .setRequestConditions(blobRequestConditions) - .setParallelTransferOptions(service.getBlobRequestOptionsForWriteBlob()), - timeout(), client.v2().get()); - logger.trace(() -> new ParameterizedMessage("upload({}, stream, {}) - status [{}]", - blobName, blobSize, response.getStatusCode())); + new BlobParallelUploadOptions(inputStream, blobSize).setRequestConditions(blobRequestConditions) + .setParallelTransferOptions(service.getBlobRequestOptionsForWriteBlob()), + timeout(), + client.v2().get() + ); + logger.trace( + () -> new ParameterizedMessage("upload({}, stream, {}) - status [{}]", blobName, blobSize, response.getStatusCode()) + ); }); } catch (final BlobStorageException se) { - if (failIfAlreadyExists && se.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && - BlobErrorCode.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { + if (failIfAlreadyExists + && se.getStatusCode() == HttpURLConnection.HTTP_CONFLICT + && BlobErrorCode.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { throw new FileAlreadyExistsException(blobName, null, se.getMessage()); } throw se; } catch (final RuntimeException ex) { // Since most of the logic is happening inside the reactive pipeline, the checked exceptions - // are swallowed and wrapped into runtime one (see please Exceptions.ReactiveException). + // are swallowed and wrapped into runtime one (see please Exceptions.ReactiveException). if (ex.getCause() != null) { Throwables.rethrow(ex.getCause()); } else { throw ex; } - } - + } + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); } private Tuple> client() { return service.client(clientName, metricsCollector); } - + private Duration timeout() { return service.getBlobRequestTimeout(clientName); } @@ -397,7 +407,7 @@ private Duration timeout() { public Map stats() { return stats.toMap(); } - + /** * Extracts the name of the blob from path or prefixed blob name * @param pathOrName prefixed blob name or blob path @@ -411,7 +421,7 @@ private String getBlobName(final String pathOrName, final String container, fina if (name.matches("." + container + ".")) { name = name.substring(1 + container.length() + 1); } - + if (name.startsWith(keyPath)) { name = name.substring(keyPath.length()); } @@ -434,12 +444,20 @@ private static class Stats { private final AtomicLong putBlockListOperations = new AtomicLong(); private Map toMap() { - return org.opensearch.common.collect.Map.of("GetBlob", getOperations.get(), - "ListBlobs", listOperations.get(), - "GetBlobProperties", headOperations.get(), - "PutBlob", putOperations.get(), - "PutBlock", putBlockOperations.get(), - "PutBlockList", putBlockListOperations.get()); + return org.opensearch.common.collect.Map.of( + "GetBlob", + getOperations.get(), + "ListBlobs", + listOperations.get(), + "GetBlobProperties", + headOperations.get(), + "PutBlob", + putOperations.get(), + "PutBlock", + putBlockOperations.get(), + "PutBlockList", + putBlockListOperations.get() + ); } } } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index 29fae7e42dfa1..a7799fef475f3 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -72,16 +72,34 @@ public class AzureRepository extends MeteredBlobStoreRepository { public static final class Repository { @Deprecated // Replaced by client - public static final Setting ACCOUNT_SETTING = new Setting<>("account", "default", Function.identity(), - Property.NodeScope, Property.Deprecated); + public static final Setting ACCOUNT_SETTING = new Setting<>( + "account", + "default", + Function.identity(), + Property.NodeScope, + Property.Deprecated + ); public static final Setting CLIENT_NAME = new Setting<>("client", ACCOUNT_SETTING, Function.identity()); - public static final Setting CONTAINER_SETTING = - new Setting<>("container", "opensearch-snapshots", Function.identity(), Property.NodeScope); + public static final Setting CONTAINER_SETTING = new Setting<>( + "container", + "opensearch-snapshots", + Function.identity(), + Property.NodeScope + ); public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); - public static final Setting LOCATION_MODE_SETTING = new Setting<>("location_mode", - s -> LocationMode.PRIMARY_ONLY.toString(), s -> LocationMode.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); - public static final Setting CHUNK_SIZE_SETTING = - Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = new Setting<>( + "location_mode", + s -> LocationMode.PRIMARY_ONLY.toString(), + s -> LocationMode.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope + ); + public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting( + "chunk_size", + MAX_CHUNK_SIZE, + MIN_CHUNK_SIZE, + MAX_CHUNK_SIZE, + Property.NodeScope + ); public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } @@ -96,9 +114,16 @@ public AzureRepository( final NamedXContentRegistry namedXContentRegistry, final AzureStorageService storageService, final ClusterService clusterService, - final RecoverySettings recoverySettings) { - super(metadata, Repository.COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, - recoverySettings, buildLocation(metadata)); + final RecoverySettings recoverySettings + ) { + super( + metadata, + Repository.COMPRESS_SETTING.get(metadata.settings()), + namedXContentRegistry, + clusterService, + recoverySettings, + buildLocation(metadata) + ); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; @@ -106,7 +131,7 @@ public AzureRepository( if (Strings.hasLength(basePath)) { // Remove starting / if any BlobPath path = new BlobPath(); - for(final String elem : basePath.split("/")) { + for (final String elem : basePath.split("/")) { path = path.add(elem); } this.basePath = path; @@ -125,8 +150,12 @@ public AzureRepository( } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of("base_path", Repository.BASE_PATH_SETTING.get(metadata.settings()), - "container", Repository.CONTAINER_SETTING.get(metadata.settings())); + return org.opensearch.common.collect.Map.of( + "base_path", + Repository.BASE_PATH_SETTING.get(metadata.settings()), + "container", + Repository.CONTAINER_SETTING.get(metadata.settings()) + ); } @Override @@ -138,10 +167,16 @@ protected BlobStore getBlobStore() { protected AzureBlobStore createBlobStore() { final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService, threadPool); - logger.debug(() -> new ParameterizedMessage( - "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - blobStore, chunkSize, isCompress(), basePath)); - + logger.debug( + () -> new ParameterizedMessage( + "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", + blobStore, + chunkSize, + isCompress(), + basePath + ) + ); + return blobStore; } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java index 48422d997f7ef..1f829852dfcf9 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepositoryPlugin.java @@ -73,11 +73,16 @@ AzureStorageService createAzureStoreService(final Settings settings) { } @Override - public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, - ClusterService clusterService, RecoverySettings recoverySettings) { - return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, namedXContentRegistry, azureStoreService, clusterService, - recoverySettings)); + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + AzureRepository.TYPE, + (metadata) -> new AzureRepository(metadata, namedXContentRegistry, azureStoreService, clusterService, recoverySettings) + ); } @Override diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index bdeed1231f211..7eaff6d6875d6 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -85,7 +85,7 @@ public class AzureStorageService implements AutoCloseable { private final ClientLogger logger = new ClientLogger(AzureStorageService.class); - + /** * Maximum blob's block size size */ @@ -94,8 +94,10 @@ public class AzureStorageService implements AutoCloseable { /** * Maximum allowed blob's block size in Azure blob store. */ - public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES_LONG, - ByteSizeUnit.BYTES); + public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue( + BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES_LONG, + ByteSizeUnit.BYTES + ); // 'package' for testing volatile Map storageSettings = emptyMap(); @@ -106,25 +108,25 @@ public AzureStorageService(Settings settings) { final Map clientsSettings = AzureStorageSettings.load(settings); refreshAndClearCache(clientsSettings); } - + /** * Obtains a {@code BlobServiceClient} on each invocation using the current client - * settings. BlobServiceClient is thread safe and and could be cached but the settings + * settings. BlobServiceClient is thread safe and and could be cached but the settings * can change, therefore the instance might be recreated from scratch. - * + * * @param clientName client name * @return the {@code BlobServiceClient} instance and context */ public Tuple> client(String clientName) { return client(clientName, (request, response) -> {}); - + } /** * Obtains a {@code BlobServiceClient} on each invocation using the current client - * settings. BlobServiceClient is thread safe and and could be cached but the settings + * settings. BlobServiceClient is thread safe and and could be cached but the settings * can change, therefore the instance might be recreated from scratch. - + * @param clientName client name * @param statsCollector statistics collector * @return the {@code BlobServiceClient} instance and context @@ -135,10 +137,10 @@ public Tuple> client(String clientName, BiC throw new SettingsException("Unable to find client with name [" + clientName + "]"); } - // New Azure storage clients are thread-safe and do not hold any state so could be cached, see please: + // New Azure storage clients are thread-safe and do not hold any state so could be cached, see please: // https://github.com/Azure/azure-storage-java/blob/master/V12%20Upgrade%20Story.md#v12-the-best-of-both-worlds ClientState state = clients.get(azureStorageSettings); - + if (state == null) { state = clients.computeIfAbsent(azureStorageSettings, key -> { try { @@ -148,48 +150,45 @@ public Tuple> client(String clientName, BiC } }); } - + return new Tuple<>(state.getClient(), () -> buildOperationContext(azureStorageSettings)); } private ClientState buildClient(AzureStorageSettings azureStorageSettings, BiConsumer statsCollector) - throws InvalidKeyException, URISyntaxException { + throws InvalidKeyException, URISyntaxException { final BlobServiceClientBuilder builder = createClientBuilder(azureStorageSettings); - + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(new NioThreadFactory()); - final NettyAsyncHttpClientBuilder clientBuilder = new NettyAsyncHttpClientBuilder() - .eventLoopGroup(eventLoopGroup); + final NettyAsyncHttpClientBuilder clientBuilder = new NettyAsyncHttpClientBuilder().eventLoopGroup(eventLoopGroup); final Proxy proxy = azureStorageSettings.getProxy(); if (proxy != null) { - final Type type = Arrays - .stream(Type.values()) + final Type type = Arrays.stream(Type.values()) .filter(t -> t.toProxyType().equals(proxy.type())) .findFirst() .orElseThrow(() -> new IllegalArgumentException("Unsupported proxy type: " + proxy.type())); - clientBuilder.proxy(new ProxyOptions(type, (InetSocketAddress)proxy.address())); + clientBuilder.proxy(new ProxyOptions(type, (InetSocketAddress) proxy.address())); } builder.httpClient(clientBuilder.build()); // We define a default exponential retry policy return new ClientState( - applyLocationMode(builder, azureStorageSettings) - .addPolicy(new HttpStatsPolicy(statsCollector)) - .buildClient(), - eventLoopGroup); + applyLocationMode(builder, azureStorageSettings).addPolicy(new HttpStatsPolicy(statsCollector)).buildClient(), + eventLoopGroup + ); } - + /** - * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using + * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using * retry options and combination of primary / secondary endpoints. Refer to migration guide for mode details: * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous */ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilder builder, final AzureStorageSettings settings) { final StorageConnectionString storageConnectionString = StorageConnectionString.create(settings.getConnectString(), logger); final StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); - + if (endpoint == null || endpoint.getPrimaryUri() == null) { throw new IllegalArgumentException("connectionString missing required settings to derive blob service primary endpoint."); } @@ -201,29 +200,25 @@ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilde if (endpoint.getSecondaryUri() == null) { throw new IllegalArgumentException("connectionString missing required settings to derive blob service secondary endpoint."); } - - builder - .endpoint(endpoint.getSecondaryUri()) - .retryOptions(createRetryPolicy(settings, null)); + + builder.endpoint(endpoint.getSecondaryUri()).retryOptions(createRetryPolicy(settings, null)); } else if (locationMode == LocationMode.PRIMARY_THEN_SECONDARY) { builder.retryOptions(createRetryPolicy(settings, endpoint.getSecondaryUri())); } else if (locationMode == LocationMode.SECONDARY_THEN_PRIMARY) { if (endpoint.getSecondaryUri() == null) { throw new IllegalArgumentException("connectionString missing required settings to derive blob service secondary endpoint."); } - - builder - .endpoint(endpoint.getSecondaryUri()) - .retryOptions(createRetryPolicy(settings, endpoint.getPrimaryUri())); + + builder.endpoint(endpoint.getSecondaryUri()).retryOptions(createRetryPolicy(settings, endpoint.getPrimaryUri())); } else { throw new IllegalArgumentException("Unsupported location mode: " + locationMode); } - + return builder; } - private static BlobServiceClientBuilder createClientBuilder(AzureStorageSettings settings) - throws InvalidKeyException, URISyntaxException { + private static BlobServiceClientBuilder createClientBuilder(AzureStorageSettings settings) throws InvalidKeyException, + URISyntaxException { return new BlobServiceClientBuilder().connectionString(settings.getConnectString()); } @@ -239,8 +234,14 @@ private static Context buildOperationContext(AzureStorageSettings azureStorageSe // non-static, package private for testing RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSettings, String secondaryHost) { // We define a default exponential retry policy{ - return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - (Integer)null, null, null, secondaryHost); + return new RequestRetryOptions( + RetryPolicyType.EXPONENTIAL, + azureStorageSettings.getMaxRetries(), + (Integer) null, + null, + null, + secondaryHost + ); } /** @@ -255,7 +256,7 @@ public Map refreshAndClearCache(Map prevClients = new HashMap<>(this.clients); prevClients.values().forEach(this::closeInternally); prevClients.clear(); - + this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); this.clients.clear(); @@ -274,30 +275,30 @@ public Duration getBlobRequestTimeout(String clientName) { if (azureStorageSettings == null) { throw new SettingsException("Unable to find client with name [" + clientName + "]"); } - + // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) final long timeout = azureStorageSettings.getTimeout().getMillis(); - + if (timeout > 0) { if (timeout > Integer.MAX_VALUE) { throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); } - + return Duration.ofMillis(timeout); } - + return null; } - + ParallelTransferOptions getBlobRequestOptionsForWriteBlob() { return null; } - + private void closeInternally(ClientState state) { final Future shutdownFuture = state.getEventLoopGroup().shutdownGracefully(0, 5, TimeUnit.SECONDS); shutdownFuture.awaitUninterruptibly(); - + if (shutdownFuture.isSuccess() == false) { logger.warning("Error closing Netty Event Loop group", shutdownFuture.cause()); } @@ -309,7 +310,7 @@ private void closeInternally(ClientState state) { */ private static class HttpStatsPolicy implements HttpPipelinePolicy { private final BiConsumer statsCollector; - + HttpStatsPolicy(final BiConsumer statsCollector) { this.statsCollector = statsCollector; } @@ -317,9 +318,7 @@ private static class HttpStatsPolicy implements HttpPipelinePolicy { @Override public Mono process(HttpPipelineCallContext httpPipelineCallContext, HttpPipelineNextPolicy httpPipelineNextPolicy) { final HttpRequest request = httpPipelineCallContext.getHttpRequest(); - return httpPipelineNextPolicy - .process() - .doOnNext(response -> statsCollector.accept(request, response)); + return httpPipelineNextPolicy.process().doOnNext(response -> statsCollector.accept(request, response)); } @Override @@ -328,7 +327,7 @@ public HttpPipelinePosition getPipelinePosition() { return HttpPipelinePosition.PER_RETRY; } } - + /** * Helper class to hold the state of the cached clients and associated event groups to support * graceful shutdown logic. @@ -336,21 +335,21 @@ public HttpPipelinePosition getPipelinePosition() { private static class ClientState { private final BlobServiceClient client; private final EventLoopGroup eventLoopGroup; - + ClientState(final BlobServiceClient client, final EventLoopGroup eventLoopGroup) { this.client = client; this.eventLoopGroup = eventLoopGroup; } - + public BlobServiceClient getClient() { return client; } - + public EventLoopGroup getEventLoopGroup() { return eventLoopGroup; } } - + /** * The NIO thread factory which is aware of the SecurityManager */ @@ -363,9 +362,7 @@ private static class NioThreadFactory implements ThreadFactory { NioThreadFactory() { SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); - namePrefix = "reactor-nio-" + - poolNumber.getAndIncrement() + - "-thread-"; + namePrefix = "reactor-nio-" + poolNumber.getAndIncrement() + "-thread-"; } public Thread newThread(Runnable r) { @@ -377,15 +374,15 @@ public Thread newThread(Runnable r) { }); }; final Thread t = new Thread(group, priviledged, namePrefix + threadNumber.getAndIncrement(), 0); - + if (t.isDaemon()) { t.setDaemon(false); } - + if (t.getPriority() != Thread.NORM_PRIORITY) { t.setPriority(Thread.NORM_PRIORITY); } - + return t; } } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java index 064f6c200c72c..3cc808a6fea57 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java @@ -58,39 +58,71 @@ final class AzureStorageSettings { private static final String AZURE_CLIENT_PREFIX_KEY = "azure.client."; /** Azure account name */ - public static final AffixSetting ACCOUNT_SETTING = - Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "account", key -> SecureSetting.secureString(key, null)); + public static final AffixSetting ACCOUNT_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "account", + key -> SecureSetting.secureString(key, null) + ); /** Azure key */ - public static final AffixSetting KEY_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "key", - key -> SecureSetting.secureString(key, null)); + public static final AffixSetting KEY_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "key", + key -> SecureSetting.secureString(key, null) + ); /** Azure SAS token */ - public static final AffixSetting SAS_TOKEN_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "sas_token", - key -> SecureSetting.secureString(key, null)); + public static final AffixSetting SAS_TOKEN_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "sas_token", + key -> SecureSetting.secureString(key, null) + ); /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ - public static final AffixSetting MAX_RETRIES_SETTING = - Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "max_retries", - (key) -> Setting.intSetting(key, 3, Setting.Property.NodeScope), - () -> ACCOUNT_SETTING, () -> KEY_SETTING); + public static final AffixSetting MAX_RETRIES_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "max_retries", + (key) -> Setting.intSetting(key, 3, Setting.Property.NodeScope), + () -> ACCOUNT_SETTING, + () -> KEY_SETTING + ); /** * Azure endpoint suffix. Default to core.windows.net (CloudStorageAccount.DEFAULT_DNS). */ - public static final AffixSetting ENDPOINT_SUFFIX_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "endpoint_suffix", - key -> Setting.simpleString(key, Property.NodeScope), () -> ACCOUNT_SETTING, () -> KEY_SETTING); + public static final AffixSetting ENDPOINT_SUFFIX_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "endpoint_suffix", + key -> Setting.simpleString(key, Property.NodeScope), + () -> ACCOUNT_SETTING, + () -> KEY_SETTING + ); - public static final AffixSetting TIMEOUT_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "timeout", - (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(-1), Property.NodeScope), () -> ACCOUNT_SETTING, () -> KEY_SETTING); + public static final AffixSetting TIMEOUT_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "timeout", + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(-1), Property.NodeScope), + () -> ACCOUNT_SETTING, + () -> KEY_SETTING + ); /** The type of the proxy to connect to azure through. Can be direct (no proxy, default), http or socks */ - public static final AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "proxy.type", - (key) -> new Setting<>(key, "direct", s -> Proxy.Type.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) - , () -> ACCOUNT_SETTING, () -> KEY_SETTING); + public static final AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "proxy.type", + (key) -> new Setting<>(key, "direct", s -> Proxy.Type.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope), + () -> ACCOUNT_SETTING, + () -> KEY_SETTING + ); /** The host name of a proxy to connect to azure through. */ - public static final AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "proxy.host", - (key) -> Setting.simpleString(key, Property.NodeScope), () -> KEY_SETTING, () -> ACCOUNT_SETTING, () -> PROXY_TYPE_SETTING); + public static final AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting( + AZURE_CLIENT_PREFIX_KEY, + "proxy.host", + (key) -> Setting.simpleString(key, Property.NodeScope), + () -> KEY_SETTING, + () -> ACCOUNT_SETTING, + () -> PROXY_TYPE_SETTING + ); /** The port of a proxy to connect to azure through. */ public static final Setting PROXY_PORT_SETTING = Setting.affixKeySetting( @@ -100,7 +132,8 @@ final class AzureStorageSettings { () -> ACCOUNT_SETTING, () -> KEY_SETTING, () -> PROXY_TYPE_SETTING, - () -> PROXY_HOST_SETTING); + () -> PROXY_HOST_SETTING + ); private final String account; private final String connectString; @@ -111,8 +144,15 @@ final class AzureStorageSettings { private final LocationMode locationMode; // copy-constructor - private AzureStorageSettings(String account, String connectString, String endpointSuffix, TimeValue timeout, int maxRetries, - Proxy proxy, LocationMode locationMode) { + private AzureStorageSettings( + String account, + String connectString, + String endpointSuffix, + TimeValue timeout, + int maxRetries, + Proxy proxy, + LocationMode locationMode + ) { this.account = account; this.connectString = connectString; this.endpointSuffix = endpointSuffix; @@ -122,8 +162,17 @@ private AzureStorageSettings(String account, String connectString, String endpoi this.locationMode = locationMode; } - private AzureStorageSettings(String account, String key, String sasToken, String endpointSuffix, TimeValue timeout, int maxRetries, - Proxy.Type proxyType, String proxyHost, Integer proxyPort) { + private AzureStorageSettings( + String account, + String key, + String sasToken, + String endpointSuffix, + TimeValue timeout, + int maxRetries, + Proxy.Type proxyType, + String proxyHost, + Integer proxyPort + ) { this.account = account; this.connectString = buildConnectString(account, key, sasToken, endpointSuffix); this.endpointSuffix = endpointSuffix; @@ -233,21 +282,26 @@ public static Map load(Settings settings) { // pkg private for tests /** Parse settings for a single client. */ private static AzureStorageSettings getClientSettings(Settings settings, String clientName) { - try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); - SecureString key = getConfigValue(settings, clientName, KEY_SETTING); - SecureString sasToken = getConfigValue(settings, clientName, SAS_TOKEN_SETTING)) { - return new AzureStorageSettings(account.toString(), key.toString(), sasToken.toString(), + try ( + SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); + SecureString key = getConfigValue(settings, clientName, KEY_SETTING); + SecureString sasToken = getConfigValue(settings, clientName, SAS_TOKEN_SETTING) + ) { + return new AzureStorageSettings( + account.toString(), + key.toString(), + sasToken.toString(), getValue(settings, clientName, ENDPOINT_SUFFIX_SETTING), getValue(settings, clientName, TIMEOUT_SETTING), getValue(settings, clientName, MAX_RETRIES_SETTING), getValue(settings, clientName, PROXY_TYPE_SETTING), getValue(settings, clientName, PROXY_HOST_SETTING), - getValue(settings, clientName, PROXY_PORT_SETTING)); + getValue(settings, clientName, PROXY_PORT_SETTING) + ); } } - private static T getConfigValue(Settings settings, String clientName, - Setting.AffixSetting clientSetting) { + private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } @@ -258,13 +312,24 @@ private static T getValue(Settings settings, String groupName, Setting se return setting.getConcreteSetting(fullKey).get(settings); } - static Map overrideLocationMode(Map clientsSettings, - LocationMode locationMode) { + static Map overrideLocationMode( + Map clientsSettings, + LocationMode locationMode + ) { final MapBuilder mapBuilder = new MapBuilder<>(); for (final Map.Entry entry : clientsSettings.entrySet()) { - mapBuilder.put(entry.getKey(), - new AzureStorageSettings(entry.getValue().account, entry.getValue().connectString, entry.getValue().endpointSuffix, - entry.getValue().timeout, entry.getValue().maxRetries, entry.getValue().proxy, locationMode)); + mapBuilder.put( + entry.getKey(), + new AzureStorageSettings( + entry.getValue().account, + entry.getValue().connectString, + entry.getValue().endpointSuffix, + entry.getValue().timeout, + entry.getValue().maxRetries, + entry.getValue().proxy, + locationMode + ) + ); } return mapBuilder.immutableMap(); } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/LocationMode.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/LocationMode.java index 965fc08795a71..df183935b2a1d 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/LocationMode.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/LocationMode.java @@ -6,7 +6,6 @@ * compatible open source license. */ - package org.opensearch.repositories.azure; /** @@ -35,4 +34,4 @@ public enum LocationMode { * primary location. */ SECONDARY_THEN_PRIMARY; -} \ No newline at end of file +} diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 6b1a388b5ddd2..081533b2cd6ab 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -133,7 +133,7 @@ public void tearDown() throws Exception { service.close(); service = null; } - + httpServer.stop(0); super.tearDown(); ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS); @@ -150,7 +150,10 @@ private BlobContainer createBlobContainer(final int maxRetries) { final InetSocketAddress address = httpServer.getAddress(); final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://" - + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort() + "/"; + + InetAddresses.toUriString(address.getAddress()) + + ":" + + address.getPort() + + "/"; clientSettings.put(ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(2000)); @@ -164,42 +167,47 @@ private BlobContainer createBlobContainer(final int maxRetries) { service = new AzureStorageService(clientSettings.build()) { @Override RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSettings, String secondaryHost) { - return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - 1, 10L, 100L, secondaryHost); + return new RequestRetryOptions( + RetryPolicyType.EXPONENTIAL, + azureStorageSettings.getMaxRetries(), + 1, + 10L, + 100L, + secondaryHost + ); } - + @Override ParallelTransferOptions getBlobRequestOptionsForWriteBlob() { return new ParallelTransferOptions().setMaxSingleUploadSizeLong(ByteSizeUnit.MB.toBytes(1)); } }; - final RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repository", AzureRepository.TYPE, - Settings.builder() - .put(CONTAINER_SETTING.getKey(), "container") - .put(ACCOUNT_SETTING.getKey(), clientName) - .build()); + final RepositoryMetadata repositoryMetadata = new RepositoryMetadata( + "repository", + AzureRepository.TYPE, + Settings.builder().put(CONTAINER_SETTING.getKey(), "container").put(ACCOUNT_SETTING.getKey(), clientName).build() + ); return new AzureBlobContainer(BlobPath.cleanPath(), new AzureBlobStore(repositoryMetadata, service, threadPool), threadPool); } public void testReadNonexistentBlobThrowsNoSuchFileException() { final BlobContainer blobContainer = createBlobContainer(between(1, 5)); - final Exception exception = expectThrows(NoSuchFileException.class, - () -> { - if (randomBoolean()) { - blobContainer.readBlob("read_nonexistent_blob"); - } else { - final long position = randomLongBetween(0, MAX_RANGE_VAL - 1L); - final long length = randomLongBetween(1, MAX_RANGE_VAL - position); - blobContainer.readBlob("read_nonexistent_blob", position, length); - } - }); + final Exception exception = expectThrows(NoSuchFileException.class, () -> { + if (randomBoolean()) { + blobContainer.readBlob("read_nonexistent_blob"); + } else { + final long position = randomLongBetween(0, MAX_RANGE_VAL - 1L); + final long length = randomLongBetween(1, MAX_RANGE_VAL - position); + blobContainer.readBlob("read_nonexistent_blob", position, length); + } + }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("404")); } public void testReadBlobWithRetries() throws Exception { - // The request retry policy counts the first attempt as retry, so we need to + // The request retry policy counts the first attempt as retry, so we need to // account for that and increase the max retry count by one. final int maxRetries = randomIntBetween(2, 6); final CountDown countDownHead = new CountDown(maxRetries - 1); @@ -246,7 +254,7 @@ public void testReadBlobWithRetries() throws Exception { } public void testReadRangeBlobWithRetries() throws Exception { - // The request retry policy counts the first attempt as retry, so we need to + // The request retry policy counts the first attempt as retry, so we need to // account for that and increase the max retry count by one. final int maxRetries = randomIntBetween(2, 6); final CountDown countDownGet = new CountDown(maxRetries - 1); @@ -296,7 +304,7 @@ public void testReadRangeBlobWithRetries() throws Exception { } public void testWriteBlobWithRetries() throws Exception { - // The request retry policy counts the first attempt as retry, so we need to + // The request retry policy counts the first attempt as retry, so we need to // account for that and increase the max retry count by one. final int maxRetries = randomIntBetween(2, 6); final CountDown countDown = new CountDown(maxRetries - 1); @@ -305,7 +313,7 @@ public void testWriteBlobWithRetries() throws Exception { httpServer.createContext("/container/write_blob_max_retries", exchange -> { if ("PUT".equals(exchange.getRequestMethod())) { exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); - + if (countDown.countDown()) { final BytesReference body = Streams.readFully(exchange.getRequestBody()); if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) { @@ -337,7 +345,7 @@ public void testWriteBlobWithRetries() throws Exception { } public void testWriteLargeBlob() throws Exception { - // The request retry policy counts the first attempt as retry, so we need to + // The request retry policy counts the first attempt as retry, so we need to // account for that and increase the max retry count by one. final int maxRetries = randomIntBetween(3, 6); @@ -435,8 +443,10 @@ public void reset() { throw new AssertionError("should not be called"); } }) { - final IOException ioe = expectThrows(IOException.class, () -> - blobContainer.writeBlob("write_blob_max_retries", stream, randomIntBetween(1, 128), randomBoolean())); + final IOException ioe = expectThrows( + IOException.class, + () -> blobContainer.writeBlob("write_blob_max_retries", stream, randomIntBetween(1, 128), randomBoolean()) + ); assertThat(ioe.getMessage(), is("foo")); } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index 634c321deb6e8..01235d6193d9c 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -62,9 +62,13 @@ private AzureRepository azureRepository(Settings settings) { .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) .put(settings) .build(); - final AzureRepository azureRepository = new AzureRepository(new RepositoryMetadata("foo", "azure", internalSettings), - NamedXContentRegistry.EMPTY, mock(AzureStorageService.class), BlobStoreTestUtil.mockClusterService(), - new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); + final AzureRepository azureRepository = new AzureRepository( + new RepositoryMetadata("foo", "azure", internalSettings), + NamedXContentRegistry.EMPTY, + mock(AzureStorageService.class), + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); assertThat(azureRepository.getBlobStore(), is(nullValue())); return azureRepository; } @@ -74,50 +78,76 @@ public void testReadonlyDefault() { } public void testReadonlyDefaultAndReadonlyOn() { - assertThat(azureRepository(Settings.builder() - .put("readonly", true) - .build()).isReadOnly(), is(true)); + assertThat(azureRepository(Settings.builder().put("readonly", true).build()).isReadOnly(), is(true)); } public void testReadonlyWithPrimaryOnly() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()) - .build()).isReadOnly(), is(false)); + assertThat( + azureRepository( + Settings.builder().put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()).build() + ).isReadOnly(), + is(false) + ); } public void testReadonlyWithPrimaryOnlyAndReadonlyOn() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()) - .put("readonly", true) - .build()).isReadOnly(), is(true)); + assertThat( + azureRepository( + Settings.builder() + .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()) + .put("readonly", true) + .build() + ).isReadOnly(), + is(true) + ); } public void testReadonlyWithSecondaryOnlyAndReadonlyOn() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) - .put("readonly", true) - .build()).isReadOnly(), is(true)); + assertThat( + azureRepository( + Settings.builder() + .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) + .put("readonly", true) + .build() + ).isReadOnly(), + is(true) + ); } public void testReadonlyWithSecondaryOnlyAndReadonlyOff() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) - .put("readonly", false) - .build()).isReadOnly(), is(false)); + assertThat( + azureRepository( + Settings.builder() + .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) + .put("readonly", false) + .build() + ).isReadOnly(), + is(false) + ); } public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOn() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) - .put("readonly", true) - .build()).isReadOnly(), is(true)); + assertThat( + azureRepository( + Settings.builder() + .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) + .put("readonly", true) + .build() + ).isReadOnly(), + is(true) + ); } public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() { - assertThat(azureRepository(Settings.builder() - .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) - .put("readonly", false) - .build()).isReadOnly(), is(false)); + assertThat( + azureRepository( + Settings.builder() + .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) + .put("readonly", false) + .build() + ).isReadOnly(), + is(false) + ); } public void testChunkSize() { @@ -131,20 +161,22 @@ public void testChunkSize() { assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize()); // zero bytes is not allowed - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - azureRepository(Settings.builder().put("chunk_size", "0").build())); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> azureRepository(Settings.builder().put("chunk_size", "0").build()) + ); assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed - e = expectThrows(IllegalArgumentException.class, () -> - azureRepository(Settings.builder().put("chunk_size", "-1").build())); + e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "-1").build())); assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed - e = expectThrows(IllegalArgumentException.class, () -> - azureRepository(Settings.builder().put("chunk_size", "6tb").build())); - assertEquals("failed to parse value [6tb] for setting [chunk_size], must be <= [" - + AzureStorageService.MAX_CHUNK_SIZE.getStringRep() + "]", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "6tb").build())); + assertEquals( + "failed to parse value [6tb] for setting [chunk_size], must be <= [" + AzureStorageService.MAX_CHUNK_SIZE.getStringRep() + "]", + e.getMessage() + ); } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index db25eb7d3bfd2..bb39f8815ad77 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -73,11 +73,13 @@ public static void shutdownSchedulers() { } public void testReadSecuredSettings() { - final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) - .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build(); + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix") + .build(); final Map loadedSettings = AzureStorageSettings.load(settings); - assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default")); + assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1", "azure2", "azure3", "default")); assertThat(loadedSettings.get("azure1").getEndpointSuffix(), is(emptyString())); assertThat(loadedSettings.get("azure2").getEndpointSuffix(), is(emptyString())); @@ -99,8 +101,10 @@ private AzureStorageService storageServiceWithSettingsValidation(Settings settin } public void testCreateClientWithEndpointSuffix() throws IOException { - final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) - .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix") + .build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final BlobServiceClient client1 = azureStorageService.client("azure1").v1(); @@ -236,9 +240,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { } public void testNoProxy() { - final Settings settings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .build(); + final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()).build(); final AzureStorageService mock = storageServiceWithSettingsValidation(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); @@ -373,18 +375,18 @@ private static Settings buildSettings() { private static String encodeKey(final String value) { return Base64.getEncoder().encodeToString(value.getBytes(StandardCharsets.UTF_8)); } - + private static RequestRetryPolicy requestRetryOptions(BlobServiceClient client) { for (int i = 0; i < client.getHttpPipeline().getPolicyCount(); ++i) { final HttpPipelinePolicy policy = client.getHttpPipeline().getPolicy(i); if (policy instanceof RequestRetryPolicy) { - return (RequestRetryPolicy)policy; + return (RequestRetryPolicy) policy; } } - + return null; } - + /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index 6f07bc6adfc85..7ce28855417c3 100644 --- a/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -48,4 +48,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 63f1f802fdf23..4bc59b6ae6553 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -90,10 +90,13 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { public static void assumeNotJava8() { - assumeFalse("This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " + - "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " + - "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " + - "for more background on this issue.", JavaVersion.current().equals(JavaVersion.parse("8"))); + assumeFalse( + "This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " + + "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " + + "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " + + "for more background on this issue.", + JavaVersion.current().equals(JavaVersion.parse("8")) + ); } @BeforeClass @@ -108,11 +111,7 @@ protected String repositoryType() { @Override protected Settings repositorySettings() { - return Settings.builder() - .put(super.repositorySettings()) - .put(BUCKET.getKey(), "bucket") - .put(CLIENT_NAME.getKey(), "test") - .build(); + return Settings.builder().put(super.repositorySettings()).put(BUCKET.getKey(), "bucket").put(CLIENT_NAME.getKey(), "test").build(); } @Override @@ -151,9 +150,18 @@ public void testDeleteSingleItem() { final String repoName = createRepository(randomName()); final RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); - PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> - repository.blobStore().blobContainer(repository.basePath()).deleteBlobsIgnoringIfNotExists(Collections.singletonList("foo")) - ))); + PlainActionFuture.get( + f -> repository.threadPool() + .generic() + .execute( + ActionRunnable.run( + f, + () -> repository.blobStore() + .blobContainer(repository.basePath()) + .deleteBlobsIgnoringIfNotExists(Collections.singletonList("foo")) + ) + ) + ); } public void testChunkSize() { @@ -164,31 +172,43 @@ public void testChunkSize() { // chunk size in settings final int size = randomIntBetween(1, 100); - repositoryMetadata = new RepositoryMetadata("repo", GoogleCloudStorageRepository.TYPE, - Settings.builder().put("chunk_size", size + "mb").build()); + repositoryMetadata = new RepositoryMetadata( + "repo", + GoogleCloudStorageRepository.TYPE, + Settings.builder().put("chunk_size", size + "mb").build() + ); chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata); assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize); // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - final RepositoryMetadata repoMetadata = new RepositoryMetadata("repo", GoogleCloudStorageRepository.TYPE, - Settings.builder().put("chunk_size", "0").build()); + final RepositoryMetadata repoMetadata = new RepositoryMetadata( + "repo", + GoogleCloudStorageRepository.TYPE, + Settings.builder().put("chunk_size", "0").build() + ); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata); }); assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { - final RepositoryMetadata repoMetadata = new RepositoryMetadata("repo", GoogleCloudStorageRepository.TYPE, - Settings.builder().put("chunk_size", "-1").build()); + final RepositoryMetadata repoMetadata = new RepositoryMetadata( + "repo", + GoogleCloudStorageRepository.TYPE, + Settings.builder().put("chunk_size", "-1").build() + ); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata); }); assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { - final RepositoryMetadata repoMetadata = new RepositoryMetadata("repo", GoogleCloudStorageRepository.TYPE, - Settings.builder().put("chunk_size", "6tb").build()); + final RepositoryMetadata repoMetadata = new RepositoryMetadata( + "repo", + GoogleCloudStorageRepository.TYPE, + Settings.builder().put("chunk_size", "6tb").build() + ); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetadata); }); assertEquals("failed to parse value [6tb] for setting [chunk_size], must be <= [5tb]", e.getMessage()); @@ -229,45 +249,59 @@ public TestGoogleCloudStoragePlugin(Settings settings) { protected GoogleCloudStorageService createStorageService() { return new GoogleCloudStorageService() { @Override - StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings, - final HttpTransportOptions httpTransportOptions) { + StorageOptions createStorageOptions( + final GoogleCloudStorageClientSettings clientSettings, + final HttpTransportOptions httpTransportOptions + ) { StorageOptions options = super.createStorageOptions(clientSettings, httpTransportOptions); return options.toBuilder() .setHost(options.getHost()) .setCredentials(options.getCredentials()) - .setRetrySettings(RetrySettings.newBuilder() - .setTotalTimeout(options.getRetrySettings().getTotalTimeout()) - .setInitialRetryDelay(Duration.ofMillis(10L)) - .setRetryDelayMultiplier(options.getRetrySettings().getRetryDelayMultiplier()) - .setMaxRetryDelay(Duration.ofSeconds(1L)) - .setMaxAttempts(0) - .setJittered(false) - .setInitialRpcTimeout(options.getRetrySettings().getInitialRpcTimeout()) - .setRpcTimeoutMultiplier(options.getRetrySettings().getRpcTimeoutMultiplier()) - .setMaxRpcTimeout(options.getRetrySettings().getMaxRpcTimeout()) - .build()) + .setRetrySettings( + RetrySettings.newBuilder() + .setTotalTimeout(options.getRetrySettings().getTotalTimeout()) + .setInitialRetryDelay(Duration.ofMillis(10L)) + .setRetryDelayMultiplier(options.getRetrySettings().getRetryDelayMultiplier()) + .setMaxRetryDelay(Duration.ofSeconds(1L)) + .setMaxAttempts(0) + .setJittered(false) + .setInitialRpcTimeout(options.getRetrySettings().getInitialRpcTimeout()) + .setRpcTimeoutMultiplier(options.getRetrySettings().getRpcTimeoutMultiplier()) + .setMaxRpcTimeout(options.getRetrySettings().getMaxRpcTimeout()) + .build() + ) .build(); } }; } @Override - public Map getRepositories(Environment env, NamedXContentRegistry registry, - ClusterService clusterService, RecoverySettings recoverySettings) { - return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, + public Map getRepositories( + Environment env, + NamedXContentRegistry registry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + GoogleCloudStorageRepository.TYPE, metadata -> new GoogleCloudStorageRepository(metadata, registry, this.storageService, clusterService, recoverySettings) { @Override protected GoogleCloudStorageBlobStore createBlobStore() { return new GoogleCloudStorageBlobStore( - metadata.settings().get("bucket"), "test", metadata.name(), storageService, - randomIntBetween(1, 8) * 1024) { + metadata.settings().get("bucket"), + "test", + metadata.name(), + storageService, + randomIntBetween(1, 8) * 1024 + ) { @Override long getLargeBlobThresholdInBytes() { return ByteSizeUnit.MB.toBytes(1); } }; } - }); + } + ); } } @@ -305,9 +339,11 @@ protected String requestUniqueId(HttpExchange exchange) { final String range = exchange.getRequestHeaders().getFirst("Content-Range"); return exchange.getRemoteAddress().getHostString() - + " " + exchange.getRequestMethod() - + " " + exchange.getRequestURI() - + (range != null ? " " + range : ""); + + " " + + exchange.getRequestMethod() + + " " + + exchange.getRequestURI() + + (range != null ? " " + range : ""); } @Override @@ -349,16 +385,14 @@ public void maybeTrack(final String request, Headers requestHeaders) { } boolean isLastPart(Headers requestHeaders) { - if (requestHeaders.containsKey("Content-range") == false) - return false; + if (requestHeaders.containsKey("Content-range") == false) return false; // https://cloud.google.com/storage/docs/json_api/v1/parameters#contentrange final String contentRange = requestHeaders.getFirst("Content-range"); final Matcher matcher = contentRangeMatcher.matcher(contentRange); - if (matcher.matches() == false) - return false; + if (matcher.matches() == false) return false; String upperBound = matcher.group(1); String totalLength = matcher.group(2); diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index e96fa33c38fee..f1b2f78a37380 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -75,19 +75,25 @@ protected SecureSettings credentials() { assertThat(System.getProperty("test.google.bucket"), not(blankOrNullString())); MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setFile("gcs.client.default.credentials_file", - Base64.getDecoder().decode(System.getProperty("test.google.account"))); + secureSettings.setFile( + "gcs.client.default.credentials_file", + Base64.getDecoder().decode(System.getProperty("test.google.account")) + ); return secureSettings; } @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("gcs") - .setSettings(Settings.builder() - .put("bucket", System.getProperty("test.google.bucket")) - .put("base_path", System.getProperty("test.google.base", "/")) - ).get(); + .setSettings( + Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")) + ) + .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } } diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 021adc68d33be..cd6eb5357ea97 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -96,7 +96,7 @@ class GoogleCloudStorageBlobStore implements BlobStore { } else { final int largeBlobThresholdByteSize; try { - largeBlobThresholdByteSize = Integer.parseInt(largeBlobThresholdByteSizeProperty); + largeBlobThresholdByteSize = Integer.parseInt(largeBlobThresholdByteSizeProperty); } catch (final NumberFormatException e) { throw new IllegalArgumentException("failed to parse " + key + " having value [" + largeBlobThresholdByteSizeProperty + "]"); } @@ -114,11 +114,13 @@ class GoogleCloudStorageBlobStore implements BlobStore { private final GoogleCloudStorageOperationsStats stats; private final int bufferSize; - GoogleCloudStorageBlobStore(String bucketName, - String clientName, - String repositoryName, - GoogleCloudStorageService storageService, - int bufferSize) { + GoogleCloudStorageBlobStore( + String bucketName, + String clientName, + String repositoryName, + GoogleCloudStorageService storageService, + int bufferSize + ) { this.bucketName = bucketName; this.clientName = clientName; this.repositoryName = repositoryName; @@ -164,23 +166,26 @@ Map listBlobsByPrefix(String path, String prefix) throws I final String pathPrefix = buildKey(path, prefix); final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); SocketAccess.doPrivilegedVoidIOException( - () -> client().list(bucketName, BlobListOption.currentDirectory(), BlobListOption.prefix(pathPrefix)).iterateAll().forEach( - blob -> { + () -> client().list(bucketName, BlobListOption.currentDirectory(), BlobListOption.prefix(pathPrefix)) + .iterateAll() + .forEach(blob -> { assert blob.getName().startsWith(path); if (blob.isDirectory() == false) { final String suffixName = blob.getName().substring(path.length()); mapBuilder.put(suffixName, new PlainBlobMetadata(suffixName, blob.getSize())); } - })); + }) + ); return mapBuilder.immutableMap(); } Map listChildren(BlobPath path) throws IOException { final String pathStr = path.buildAsString(); final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); - SocketAccess.doPrivilegedVoidIOException - (() -> client().list(bucketName, BlobListOption.currentDirectory(), BlobListOption.prefix(pathStr)).iterateAll().forEach( - blob -> { + SocketAccess.doPrivilegedVoidIOException( + () -> client().list(bucketName, BlobListOption.currentDirectory(), BlobListOption.prefix(pathStr)) + .iterateAll() + .forEach(blob -> { if (blob.isDirectory()) { assert blob.getName().startsWith(pathStr); assert blob.getName().endsWith("/"); @@ -190,7 +195,8 @@ Map listChildren(BlobPath path) throws IOException { mapBuilder.put(suffixName, new GoogleCloudStorageBlobContainer(path.add(suffixName), this)); } } - })); + }) + ); return mapBuilder.immutableMap(); } @@ -234,8 +240,12 @@ InputStream readBlob(String blobName, long position, long length) throws IOExcep if (length == 0) { return new ByteArrayInputStream(new byte[0]); } else { - return new GoogleCloudStorageRetryingInputStream(client(), BlobId.of(bucketName, blobName), position, - Math.addExact(position, length - 1)); + return new GoogleCloudStorageRetryingInputStream( + client(), + BlobId.of(bucketName, blobName), + position, + Math.addExact(position, length - 1) + ); } } @@ -268,16 +278,16 @@ long getLargeBlobThresholdInBytes() { * @param size expected size of the blob to be written * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists */ - private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, long size, boolean failIfAlreadyExists) - throws IOException { + private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, long size, boolean failIfAlreadyExists) throws IOException { // We retry 410 GONE errors to cover the unlikely but possible scenario where a resumable upload session becomes broken and // needs to be restarted from scratch. Given how unlikely a 410 error should be according to SLAs we retry only twice. assert inputStream.markSupported(); inputStream.mark(Integer.MAX_VALUE); final byte[] buffer = new byte[size < bufferSize ? Math.toIntExact(size) : bufferSize]; StorageException storageException = null; - final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ? - new Storage.BlobWriteOption[]{Storage.BlobWriteOption.doesNotExist()} : new Storage.BlobWriteOption[0]; + final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists + ? new Storage.BlobWriteOption[] { Storage.BlobWriteOption.doesNotExist() } + : new Storage.BlobWriteOption[0]; for (int retry = 0; retry < 3; ++retry) { try { final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(() -> client().writer(blobInfo, writeOptions)); @@ -345,11 +355,10 @@ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long final byte[] buffer = new byte[Math.toIntExact(blobSize)]; Streams.readFully(inputStream, buffer); try { - final Storage.BlobTargetOption[] targetOptions = failIfAlreadyExists ? - new Storage.BlobTargetOption[] { Storage.BlobTargetOption.doesNotExist() } : - new Storage.BlobTargetOption[0]; - SocketAccess.doPrivilegedVoidIOException( - () -> client().create(blobInfo, buffer, targetOptions)); + final Storage.BlobTargetOption[] targetOptions = failIfAlreadyExists + ? new Storage.BlobTargetOption[] { Storage.BlobTargetOption.doesNotExist() } + : new Storage.BlobTargetOption[0]; + SocketAccess.doPrivilegedVoidIOException(() -> client().create(blobInfo, buffer, targetOptions)); // We don't track this operation on the http layer as // we do with the GET/LIST operations since this operations // can trigger multiple underlying http requests but only one @@ -405,22 +414,20 @@ void deleteBlobsIgnoringIfNotExists(Collection blobNames) throws IOExcep final AtomicReference ioe = new AtomicReference<>(); final StorageBatch batch = client().batch(); for (BlobId blob : blobIdsToDelete) { - batch.delete(blob).notify( - new BatchResult.Callback() { - @Override - public void success(Boolean result) { - } - - @Override - public void error(StorageException exception) { - if (exception.getCode() != HTTP_NOT_FOUND) { - failedBlobs.add(blob); - if (ioe.compareAndSet(null, exception) == false) { - ioe.get().addSuppressed(exception); - } + batch.delete(blob).notify(new BatchResult.Callback() { + @Override + public void success(Boolean result) {} + + @Override + public void error(StorageException exception) { + if (exception.getCode() != HTTP_NOT_FOUND) { + failedBlobs.add(blob); + if (ioe.compareAndSet(null, exception) == false) { + ioe.get().addSuppressed(exception); } } - }); + } + }); } batch.submit(); diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java index 810e9076bf078..d15b00712dea4 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -60,38 +60,59 @@ public class GoogleCloudStorageClientSettings { private static final String PREFIX = "gcs.client."; /** A json Service Account file loaded from secure settings. */ - static final Setting.AffixSetting CREDENTIALS_FILE_SETTING = Setting.affixKeySetting(PREFIX, "credentials_file", - key -> SecureSetting.secureFile(key, null)); + static final Setting.AffixSetting CREDENTIALS_FILE_SETTING = Setting.affixKeySetting( + PREFIX, + "credentials_file", + key -> SecureSetting.secureFile(key, null) + ); /** An override for the Storage endpoint to connect to. */ - static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> Setting.simpleString(key, Setting.Property.NodeScope)); + static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting( + PREFIX, + "endpoint", + key -> Setting.simpleString(key, Setting.Property.NodeScope) + ); /** An override for the Google Project ID. */ - static final Setting.AffixSetting PROJECT_ID_SETTING = Setting.affixKeySetting(PREFIX, "project_id", - key -> Setting.simpleString(key, Setting.Property.NodeScope)); + static final Setting.AffixSetting PROJECT_ID_SETTING = Setting.affixKeySetting( + PREFIX, + "project_id", + key -> Setting.simpleString(key, Setting.Property.NodeScope) + ); /** An override for the Token Server URI in the oauth flow. */ - static final Setting.AffixSetting TOKEN_URI_SETTING = Setting.affixKeySetting(PREFIX, "token_uri", - key -> new Setting<>(key, "", URI::create, Setting.Property.NodeScope)); + static final Setting.AffixSetting TOKEN_URI_SETTING = Setting.affixKeySetting( + PREFIX, + "token_uri", + key -> new Setting<>(key, "", URI::create, Setting.Property.NodeScope) + ); /** * The timeout to establish a connection. A value of {@code -1} corresponds to an infinite timeout. A value of {@code 0} * corresponds to the default timeout of the Google Cloud Storage Java Library. */ - static final Setting.AffixSetting CONNECT_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "connect_timeout", - key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope)); + static final Setting.AffixSetting CONNECT_TIMEOUT_SETTING = Setting.affixKeySetting( + PREFIX, + "connect_timeout", + key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope) + ); /** * The timeout to read data from an established connection. A value of {@code -1} corresponds to an infinite timeout. A value of * {@code 0} corresponds to the default timeout of the Google Cloud Storage Java Library. */ - static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", - key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope)); + static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting( + PREFIX, + "read_timeout", + key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope) + ); /** Name used by the client when it uses the Google Cloud JSON API. */ - static final Setting.AffixSetting APPLICATION_NAME_SETTING = Setting.affixKeySetting(PREFIX, "application_name", - key -> new Setting<>(key, "repository-gcs", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated)); + static final Setting.AffixSetting APPLICATION_NAME_SETTING = Setting.affixKeySetting( + PREFIX, + "application_name", + key -> new Setting<>(key, "repository-gcs", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated) + ); /** The credentials used by the client to connect to the Storage endpoint. */ private final ServiceAccountCredentials credential; @@ -114,13 +135,15 @@ public class GoogleCloudStorageClientSettings { /** The token server URI. This leases access tokens in the oauth flow. */ private final URI tokenUri; - GoogleCloudStorageClientSettings(final ServiceAccountCredentials credential, - final String endpoint, - final String projectId, - final TimeValue connectTimeout, - final TimeValue readTimeout, - final String applicationName, - final URI tokenUri) { + GoogleCloudStorageClientSettings( + final ServiceAccountCredentials credential, + final String endpoint, + final String projectId, + final TimeValue connectTimeout, + final TimeValue readTimeout, + final String applicationName, + final URI tokenUri + ) { this.credential = credential; this.endpoint = endpoint; this.projectId = projectId; @@ -160,7 +183,7 @@ public URI getTokenUri() { public static Map load(final Settings settings) { final Map clients = new HashMap<>(); - for (final String clientName: settings.getGroups(PREFIX).keySet()) { + for (final String clientName : settings.getGroups(PREFIX).keySet()) { clients.put(clientName, getClientSettings(settings, clientName)); } if (clients.containsKey("default") == false) { diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java index db92ab15487c1..7375d4edb9030 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java @@ -49,20 +49,22 @@ final class GoogleCloudStorageHttpStatsCollector implements HttpResponseInterceptor { // The specification for the current API (v1) endpoints can be found at: // https://cloud.google.com/storage/docs/json_api/v1 - private static final java.util.List> trackerFactories = - List.of( - (bucket) -> - HttpRequestTracker.get(format(Locale.ROOT, "/download/storage/v1/b/%s/o/.+", bucket), - GoogleCloudStorageOperationsStats::trackGetOperation), - - (bucket) -> - HttpRequestTracker.get(format(Locale.ROOT, "/storage/v1/b/%s/o/.+", bucket), - GoogleCloudStorageOperationsStats::trackGetOperation), - - (bucket) -> - HttpRequestTracker.get(format(Locale.ROOT, "/storage/v1/b/%s/o", bucket), - GoogleCloudStorageOperationsStats::trackListOperation) - ); + private static final java.util.List> trackerFactories = List.of( + (bucket) -> HttpRequestTracker.get( + format(Locale.ROOT, "/download/storage/v1/b/%s/o/.+", bucket), + GoogleCloudStorageOperationsStats::trackGetOperation + ), + + (bucket) -> HttpRequestTracker.get( + format(Locale.ROOT, "/storage/v1/b/%s/o/.+", bucket), + GoogleCloudStorageOperationsStats::trackGetOperation + ), + + (bucket) -> HttpRequestTracker.get( + format(Locale.ROOT, "/storage/v1/b/%s/o", bucket), + GoogleCloudStorageOperationsStats::trackListOperation + ) + ); private final GoogleCloudStorageOperationsStats gcsOperationStats; private final java.util.List trackers; @@ -77,8 +79,7 @@ final class GoogleCloudStorageHttpStatsCollector implements HttpResponseIntercep @Override public void interceptResponse(final HttpResponse response) { // TODO keep track of unsuccessful requests in different entries - if (!response.isSuccessStatusCode()) - return; + if (!response.isSuccessStatusCode()) return; final HttpRequest request = response.getRequest(); for (HttpRequestTracker tracker : trackers) { @@ -102,16 +103,17 @@ private static final class HttpRequestTracker { private final Pattern pathPattern; private final Consumer statsTracker; - private HttpRequestTracker(final String method, - final String pathPattern, - final Consumer statsTracker) { + private HttpRequestTracker( + final String method, + final String pathPattern, + final Consumer statsTracker + ) { this.method = method; this.pathPattern = Pattern.compile(pathPattern); this.statsTracker = statsTracker; } - private static HttpRequestTracker get(final String pathPattern, - final Consumer statsConsumer) { + private static HttpRequestTracker get(final String pathPattern, final Consumer statsConsumer) { return new HttpRequestTracker("GET", pathPattern, statsConsumer); } @@ -124,16 +126,14 @@ private static HttpRequestTracker get(final String pathPattern, * @return {@code true} if the http request was tracked, {@code false} otherwise. */ private boolean track(final HttpRequest httpRequest, final GoogleCloudStorageOperationsStats stats) { - if (matchesCriteria(httpRequest) == false) - return false; + if (matchesCriteria(httpRequest) == false) return false; statsTracker.accept(stats); return true; } private boolean matchesCriteria(final HttpRequest httpRequest) { - return method.equalsIgnoreCase(httpRequest.getRequestMethod()) && - pathMatches(httpRequest.getUrl()); + return method.equalsIgnoreCase(httpRequest.getRequestMethod()) && pathMatches(httpRequest.getUrl()); } private boolean pathMatches(final GenericUrl url) { diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java index 22bc73fc77b49..7d51a6196e4c8 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -65,11 +65,22 @@ protected GoogleCloudStorageService createStorageService() { } @Override - public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, - ClusterService clusterService, RecoverySettings recoverySettings) { - return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, - metadata -> new GoogleCloudStorageRepository(metadata, namedXContentRegistry, this.storageService, clusterService, - recoverySettings)); + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + GoogleCloudStorageRepository.TYPE, + metadata -> new GoogleCloudStorageRepository( + metadata, + namedXContentRegistry, + this.storageService, + clusterService, + recoverySettings + ) + ); } @Override @@ -81,7 +92,8 @@ public List> getSettings() { GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, - GoogleCloudStorageClientSettings.TOKEN_URI_SETTING); + GoogleCloudStorageClientSettings.TOKEN_URI_SETTING + ); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index d8655f161c4f0..909dc066a493b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -68,14 +68,17 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { static final String TYPE = "gcs"; - static final Setting BUCKET = - simpleString("bucket", Property.NodeScope, Property.Dynamic); - static final Setting BASE_PATH = - simpleString("base_path", Property.NodeScope, Property.Dynamic); - static final Setting COMPRESS = - boolSetting("compress", false, Property.NodeScope, Property.Dynamic); - static final Setting CHUNK_SIZE = - byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic); + static final Setting BUCKET = simpleString("bucket", Property.NodeScope, Property.Dynamic); + static final Setting BASE_PATH = simpleString("base_path", Property.NodeScope, Property.Dynamic); + static final Setting COMPRESS = boolSetting("compress", false, Property.NodeScope, Property.Dynamic); + static final Setting CHUNK_SIZE = byteSizeSetting( + "chunk_size", + MAX_CHUNK_SIZE, + MIN_CHUNK_SIZE, + MAX_CHUNK_SIZE, + Property.NodeScope, + Property.Dynamic + ); static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); private final GoogleCloudStorageService storageService; @@ -89,7 +92,8 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { final NamedXContentRegistry namedXContentRegistry, final GoogleCloudStorageService storageService, final ClusterService clusterService, - final RecoverySettings recoverySettings) { + final RecoverySettings recoverySettings + ) { super(metadata, getSetting(COMPRESS, metadata), namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.storageService = storageService; @@ -111,8 +115,12 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of("base_path", BASE_PATH.get(metadata.settings()), - "bucket", getSetting(BUCKET, metadata)); + return org.opensearch.common.collect.Map.of( + "base_path", + BASE_PATH.get(metadata.settings()), + "bucket", + getSetting(BUCKET, metadata) + ); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index a7f6ffe53e02e..72d3e37466d09 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -126,33 +126,34 @@ private InputStream openStream() throws IOException { try { try { return RetryHelper.runWithRetries(() -> { - try { - return SocketAccess.doPrivilegedIOException(() -> { - final Get get = storage.objects().get(blobId.getBucket(), blobId.getName()); - get.setReturnRawInputStream(true); - - if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { - get.getRequestHeaders().setRange("bytes=" + Math.addExact(start, currentOffset) + "-" + end); - } - final HttpResponse resp = get.executeMedia(); - final Long contentLength = resp.getHeaders().getContentLength(); - InputStream content = resp.getContent(); - if (contentLength != null) { - content = new ContentLengthValidatingInputStream(content, contentLength); - } - return content; - }); - } catch (IOException e) { - throw StorageException.translate(e); - } - }, client.getOptions().getRetrySettings(), BaseService.EXCEPTION_HANDLER, client.getOptions().getClock()); + try { + return SocketAccess.doPrivilegedIOException(() -> { + final Get get = storage.objects().get(blobId.getBucket(), blobId.getName()); + get.setReturnRawInputStream(true); + + if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { + get.getRequestHeaders().setRange("bytes=" + Math.addExact(start, currentOffset) + "-" + end); + } + final HttpResponse resp = get.executeMedia(); + final Long contentLength = resp.getHeaders().getContentLength(); + InputStream content = resp.getContent(); + if (contentLength != null) { + content = new ContentLengthValidatingInputStream(content, contentLength); + } + return content; + }); + } catch (IOException e) { + throw StorageException.translate(e); + } + }, client.getOptions().getRetrySettings(), BaseService.EXCEPTION_HANDLER, client.getOptions().getClock()); } catch (RetryHelper.RetryHelperException e) { throw StorageException.translateAndThrow(e); } } catch (StorageException e) { if (e.getCode() == 404) { throw addSuppressedExceptions( - new NoSuchFileException("Blob object [" + blobId.getName() + "] not found: " + e.getMessage())); + new NoSuchFileException("Blob object [" + blobId.getName() + "] not found: " + e.getMessage()) + ); } throw addSuppressedExceptions(e); } @@ -249,8 +250,16 @@ private void reopenStreamOrFail(StorageException e) throws IOException { if (attempt >= maxAttempts) { throw addSuppressedExceptions(e); } - logger.debug(new ParameterizedMessage("failed reading [{}] at offset [{}], attempt [{}] of [{}], retrying", - blobId, currentOffset, attempt, maxAttempts), e); + logger.debug( + new ParameterizedMessage( + "failed reading [{}] at offset [{}], attempt [{}] of [{}], retrying", + blobId, + currentOffset, + attempt, + maxAttempts + ), + e + ); attempt += 1; if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { failures.add(e); diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 9d79117ddaa08..8208dcfe597ff 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -93,9 +93,8 @@ public synchronized void refreshAndClearCache(Map new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, - settings.getHost())); + logger.debug( + () -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, settings.getHost()) + ); final Storage storage = createClient(settings, stats); clientCache = MapBuilder.newMapBuilder(clientCache).put(repositoryName, storage).immutableMap(); return storage; @@ -136,8 +140,8 @@ synchronized void closeRepositoryClient(String repositoryName) { * @return a new client storage instance that can be used to manage objects * (blobs) */ - private Storage createClient(GoogleCloudStorageClientSettings clientSettings, - GoogleCloudStorageOperationsStats stats) throws IOException { + private Storage createClient(GoogleCloudStorageClientSettings clientSettings, GoogleCloudStorageOperationsStats stats) + throws IOException { final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> { final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); // requires java.lang.RuntimePermission "setFactory" @@ -148,18 +152,19 @@ private Storage createClient(GoogleCloudStorageClientSettings clientSettings, final GoogleCloudStorageHttpStatsCollector httpStatsCollector = new GoogleCloudStorageHttpStatsCollector(stats); - final HttpTransportOptions httpTransportOptions = new HttpTransportOptions(HttpTransportOptions.newBuilder() - .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) - .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) - .setHttpTransportFactory(() -> httpTransport)) { + final HttpTransportOptions httpTransportOptions = new HttpTransportOptions( + HttpTransportOptions.newBuilder() + .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) + .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) + .setHttpTransportFactory(() -> httpTransport) + ) { @Override public HttpRequestInitializer getHttpRequestInitializer(ServiceOptions serviceOptions) { HttpRequestInitializer requestInitializer = super.getHttpRequestInitializer(serviceOptions); return (httpRequest) -> { - if (requestInitializer != null) - requestInitializer.initialize(httpRequest); + if (requestInitializer != null) requestInitializer.initialize(httpRequest); httpRequest.setResponseInterceptor(httpStatsCollector); }; @@ -170,17 +175,19 @@ public HttpRequestInitializer getHttpRequestInitializer(ServiceOptions ser return storageOptions.getService(); } - StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings, - final HttpTransportOptions httpTransportOptions) { + StorageOptions createStorageOptions( + final GoogleCloudStorageClientSettings clientSettings, + final HttpTransportOptions httpTransportOptions + ) { final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() - .setTransportOptions(httpTransportOptions) - .setHeaderProvider(() -> { - final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); - if (Strings.hasLength(clientSettings.getApplicationName())) { - mapBuilder.put("user-agent", clientSettings.getApplicationName()); - } - return mapBuilder.immutableMap(); - }); + .setTransportOptions(httpTransportOptions) + .setHeaderProvider(() -> { + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + if (Strings.hasLength(clientSettings.getApplicationName())) { + mapBuilder.put("user-agent", clientSettings.getApplicationName()); + } + return mapBuilder.immutableMap(); + }); if (Strings.hasLength(clientSettings.getHost())) { storageOptionsBuilder.setHost(clientSettings.getHost()); } @@ -188,8 +195,10 @@ StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clien storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } if (clientSettings.getCredential() == null) { - logger.warn("\"Application Default Credentials\" are not supported out of the box." - + " Additional file system permissions have to be granted to the plugin."); + logger.warn( + "\"Application Default Credentials\" are not supported out of the box." + + " Additional file system permissions have to be granted to the plugin." + ); } else { ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); // override token server URI diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 1eecb2e4b70f9..6a589126a9466 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -108,10 +108,13 @@ private String httpServerUrl() { } public static void assumeNotJava8() { - assumeFalse("This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " + - "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " + - "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " + - "for more background on this issue.", JavaVersion.current().equals(JavaVersion.parse("8"))); + assumeFalse( + "This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " + + "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " + + "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " + + "for more background on this issue.", + JavaVersion.current().equals(JavaVersion.parse("8")) + ); } @BeforeClass @@ -135,10 +138,12 @@ protected Class unresponsiveExceptionType() { } @Override - protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, + protected BlobContainer createBlobContainer( + final @Nullable Integer maxRetries, final @Nullable TimeValue readTimeout, final @Nullable Boolean disableChunkedEncoding, - final @Nullable ByteSizeValue bufferSize) { + final @Nullable ByteSizeValue bufferSize + ) { final Settings.Builder clientSettings = Settings.builder(); final String client = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(client).getKey(), httpServerUrl()); @@ -153,8 +158,10 @@ protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, final GoogleCloudStorageService service = new GoogleCloudStorageService() { @Override - StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings, - final HttpTransportOptions httpTransportOptions) { + StorageOptions createStorageOptions( + final GoogleCloudStorageClientSettings clientSettings, + final HttpTransportOptions httpTransportOptions + ) { StorageOptions options = super.createStorageOptions(clientSettings, httpTransportOptions); RetrySettings.Builder retrySettingsBuilder = RetrySettings.newBuilder() .setTotalTimeout(options.getRetrySettings().getTotalTimeout()) @@ -178,8 +185,13 @@ StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clien service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(clientSettings.build())); httpServer.createContext("/token", new FakeOAuth2HttpHandler()); - final GoogleCloudStorageBlobStore blobStore = new GoogleCloudStorageBlobStore("bucket", client, "repo", service, - randomIntBetween(1, 8) * 1024); + final GoogleCloudStorageBlobStore blobStore = new GoogleCloudStorageBlobStore( + "bucket", + client, + "repo", + service, + randomIntBetween(1, 8) * 1024 + ); return new GoogleCloudStorageBlobContainer(BlobPath.cleanPath(), blobStore); } @@ -286,8 +298,13 @@ public void testWriteLargeBlob() throws IOException { final byte[] data = randomBytes(defaultChunkSize * nbChunks + lastChunkSize); assertThat(data.length, greaterThan(GoogleCloudStorageBlobStore.LARGE_BLOB_THRESHOLD_BYTE_SIZE)); - logger.debug("resumable upload is composed of [{}] total chunks ([{}] chunks of length [{}] and last chunk of length [{}]", - totalChunks, nbChunks, defaultChunkSize, lastChunkSize); + logger.debug( + "resumable upload is composed of [{}] total chunks ([{}] chunks of length [{}] and last chunk of length [{}]", + totalChunks, + nbChunks, + defaultChunkSize, + lastChunkSize + ); final int nbErrors = 2; // we want all requests to fail at least once final AtomicInteger countInits = new AtomicInteger(nbErrors); @@ -311,8 +328,11 @@ public void testWriteLargeBlob() throws IOException { if (countInits.decrementAndGet() <= 0) { byte[] response = requestBody.utf8ToString().getBytes(UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); - exchange.getResponseHeaders().add("Location", httpServerUrl() + - "/upload/storage/v1/b/bucket/o?uploadType=resumable&upload_id=" + sessionUploadId.get()); + exchange.getResponseHeaders() + .add( + "Location", + httpServerUrl() + "/upload/storage/v1/b/bucket/o?uploadType=resumable&upload_id=" + sessionUploadId.get() + ); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); return; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 8e6fef1e83fd0..054782c0a3cda 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -100,8 +100,7 @@ public void testDeleteBlobsIgnoringIfNotExistsThrowsIOException() throws Excepti final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); when(storageService.client(any(String.class), any(String.class), any(GoogleCloudStorageOperationsStats.class))).thenReturn(storage); - try (BlobStore store = new GoogleCloudStorageBlobStore("bucket", "test", "repo", storageService, - randomIntBetween(1, 8) * 1024)) { + try (BlobStore store = new GoogleCloudStorageBlobStore("bucket", "test", "repo", storageService, randomIntBetween(1, 8) * 1024)) { final BlobContainer container = store.blobContainer(new BlobPath()); IOException e = expectThrows(IOException.class, () -> container.deleteBlobsIgnoringIfNotExists(blobs)); diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index 137ea65a9bc6f..8dbf6b0ff2873 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -76,8 +76,9 @@ public void testLoad() throws Exception { final Tuple, Settings> randomClients = randomClients(nbClients, deprecationWarnings); final Map expectedClientsSettings = randomClients.v1(); - final Map actualClientsSettings = GoogleCloudStorageClientSettings - .load(randomClients.v2()); + final Map actualClientsSettings = GoogleCloudStorageClientSettings.load( + randomClients.v2() + ); assertEquals(expectedClientsSettings.size(), actualClientsSettings.size()); for (final String clientName : expectedClientsSettings.keySet()) { @@ -110,17 +111,23 @@ public void testProjectIdDefaultsToCredentials() throws Exception { final String clientName = randomAlphaOfLength(5); final Tuple credentials = randomCredential(clientName); final ServiceAccountCredentials credential = credentials.v1(); - final GoogleCloudStorageClientSettings googleCloudStorageClientSettings = new GoogleCloudStorageClientSettings(credential, - ENDPOINT_SETTING.getDefault(Settings.EMPTY), PROJECT_ID_SETTING.getDefault(Settings.EMPTY), - CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), - APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), new URI("")); + final GoogleCloudStorageClientSettings googleCloudStorageClientSettings = new GoogleCloudStorageClientSettings( + credential, + ENDPOINT_SETTING.getDefault(Settings.EMPTY), + PROJECT_ID_SETTING.getDefault(Settings.EMPTY), + CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), + new URI("") + ); assertEquals(credential.getProjectId(), googleCloudStorageClientSettings.getProjectId()); } /** Generates a given number of GoogleCloudStorageClientSettings along with the Settings to build them from **/ - private Tuple, Settings> randomClients(final int nbClients, - final List> deprecationWarnings) - throws Exception { + private Tuple, Settings> randomClients( + final int nbClients, + final List> deprecationWarnings + ) throws Exception { final Map expectedClients = new HashMap<>(); final Settings.Builder settings = Settings.builder(); @@ -143,10 +150,12 @@ private Tuple, Settings> randomCli } /** Generates a random GoogleCloudStorageClientSettings along with the Settings to build it **/ - private static GoogleCloudStorageClientSettings randomClient(final String clientName, - final Settings.Builder settings, - final MockSecureSettings secureSettings, - final List> deprecationWarnings) throws Exception { + private static GoogleCloudStorageClientSettings randomClient( + final String clientName, + final Settings.Builder settings, + final MockSecureSettings secureSettings, + final List> deprecationWarnings + ) throws Exception { final Tuple credentials = randomCredential(clientName); final ServiceAccountCredentials credential = credentials.v1(); @@ -154,8 +163,14 @@ private static GoogleCloudStorageClientSettings randomClient(final String client String endpoint; if (randomBoolean()) { - endpoint = randomFrom("http://www.opensearch.org", "http://metadata.google.com:88/oauth", "https://www.googleapis.com", - "https://www.opensearch.org:443", "http://localhost:8443", "https://www.googleapis.com/oauth/token"); + endpoint = randomFrom( + "http://www.opensearch.org", + "http://metadata.google.com:88/oauth", + "https://www.googleapis.com", + "https://www.opensearch.org:443", + "http://localhost:8443", + "https://www.googleapis.com/oauth/token" + ); settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); } else { endpoint = ENDPOINT_SETTING.getDefault(Settings.EMPTY); @@ -194,8 +209,15 @@ private static GoogleCloudStorageClientSettings randomClient(final String client applicationName = APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY); } - return new GoogleCloudStorageClientSettings(credential, endpoint, projectId, connectTimeout, readTimeout, applicationName, - new URI("")); + return new GoogleCloudStorageClientSettings( + credential, + endpoint, + projectId, + connectTimeout, + readTimeout, + applicationName, + new URI("") + ); } /** Generates a random GoogleCredential along with its corresponding Service Account file provided as a byte array **/ @@ -209,15 +231,23 @@ private static Tuple randomCredential(final S credentialBuilder.setPrivateKeyId("private_key_id_" + clientName); credentialBuilder.setScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); final String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); - final String serviceAccount = "{\"type\":\"service_account\"," + - "\"project_id\":\"project_id_" + clientName + "\"," + - "\"private_key_id\":\"private_key_id_" + clientName + "\"," + - "\"private_key\":\"-----BEGIN PRIVATE KEY-----\\n" + - encodedPrivateKey + - "\\n-----END PRIVATE KEY-----\\n\"," + - "\"client_email\":\"" + clientName + "\"," + - "\"client_id\":\"id_" + clientName + "\"" + - "}"; + final String serviceAccount = "{\"type\":\"service_account\"," + + "\"project_id\":\"project_id_" + + clientName + + "\"," + + "\"private_key_id\":\"private_key_id_" + + clientName + + "\"," + + "\"private_key\":\"-----BEGIN PRIVATE KEY-----\\n" + + encodedPrivateKey + + "\\n-----END PRIVATE KEY-----\\n\"," + + "\"client_email\":\"" + + clientName + + "\"," + + "\"client_id\":\"id_" + + clientName + + "\"" + + "}"; return Tuple.tuple(credentialBuilder.build(), serviceAccount.getBytes(StandardCharsets.UTF_8)); } diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index d7405aab86cb3..7792a5f51c459 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -63,36 +63,50 @@ public void testClientInitializer() throws Exception { final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final String endpoint = randomFrom("http://", "https://") - + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") - + ":" + randomIntBetween(1, 65535); + + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + + randomIntBetween(1, 65535); final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() - .put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), - connectTimeValue.getStringRep()) - .put(GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), - readTimeValue.getStringRep()) - .put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), - applicationName) - .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) - .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) - .build(); + .put( + GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + connectTimeValue.getStringRep() + ) + .put( + GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + readTimeValue.getStringRep() + ) + .put( + GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + applicationName + ) + .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) + .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) + .build(); final GoogleCloudStorageService service = new GoogleCloudStorageService(); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); - final IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> service.client("another_client", "repo", statsCollector)); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> service.client("another_client", "repo", statsCollector) + ); assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( - new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); + new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) } + ); final Storage storage = service.client(clientName, "repo", statsCollector); assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); - assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), - Matchers.is((int) connectTimeValue.millis())); - assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), - Matchers.is((int) readTimeValue.millis())); + assertThat( + ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), + Matchers.is((int) connectTimeValue.millis()) + ); + assertThat( + ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), + Matchers.is((int) readTimeValue.millis()) + ); assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } @@ -113,8 +127,10 @@ public void testReinitClientSettings() throws Exception { final Storage client12 = storageService.client("gcs2", "repo2", statsCollector); assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // client 3 is missing - final IllegalArgumentException e1 = - expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs3", "repo3", statsCollector)); + final IllegalArgumentException e1 = expectThrows( + IllegalArgumentException.class, + () -> storageService.client("gcs3", "repo3", statsCollector) + ); assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); // update client settings plugin.reload(settings2); @@ -126,8 +142,10 @@ public void testReinitClientSettings() throws Exception { // old client 2 not changed assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // new client2 is gone - final IllegalArgumentException e2 = - expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs2", "repo2", statsCollector)); + final IllegalArgumentException e2 = expectThrows( + IllegalArgumentException.class, + () -> storageService.client("gcs2", "repo2", statsCollector) + ); assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); // client 3 emerged final Storage client23 = storageService.client("gcs3", "repo3", statsCollector); @@ -142,12 +160,13 @@ public void testClientsAreNotSharedAcrossRepositories() throws Exception { try (GoogleCloudStoragePlugin plugin = new GoogleCloudStoragePlugin(settings)) { final GoogleCloudStorageService storageService = plugin.storageService; - final Storage repo1Client = - storageService.client("gcs1", "repo1", new GoogleCloudStorageOperationsStats("bucket")); - final Storage repo2Client = - storageService.client("gcs1", "repo2", new GoogleCloudStorageOperationsStats("bucket")); - final Storage repo1ClientSecondInstance = - storageService.client("gcs1", "repo1", new GoogleCloudStorageOperationsStats("bucket")); + final Storage repo1Client = storageService.client("gcs1", "repo1", new GoogleCloudStorageOperationsStats("bucket")); + final Storage repo2Client = storageService.client("gcs1", "repo2", new GoogleCloudStorageOperationsStats("bucket")); + final Storage repo1ClientSecondInstance = storageService.client( + "gcs1", + "repo1", + new GoogleCloudStorageOperationsStats("bucket") + ); assertNotSame(repo1Client, repo2Client); assertSame(repo1Client, repo1ClientSecondInstance); @@ -160,13 +179,13 @@ private byte[] serviceAccountFileContent(String projectId) throws Exception { final KeyPair keyPair = keyPairGenerator.generateKeyPair(); final String encodedKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); final XContentBuilder serviceAccountBuilder = jsonBuilder().startObject() - .field("type", "service_account") - .field("project_id", projectId) - .field("private_key_id", UUID.randomUUID().toString()) - .field("private_key", "-----BEGIN PRIVATE KEY-----\n" + encodedKey + "\n-----END PRIVATE KEY-----\n") - .field("client_email", "integration_test@appspot.gserviceaccount.com") - .field("client_id", "client_id") - .endObject(); + .field("type", "service_account") + .field("project_id", projectId) + .field("private_key_id", UUID.randomUUID().toString()) + .field("private_key", "-----BEGIN PRIVATE KEY-----\n" + encodedKey + "\n-----END PRIVATE KEY-----\n") + .field("client_email", "integration_test@appspot.gserviceaccount.com") + .field("client_id", "client_id") + .endObject(); return BytesReference.toBytes(BytesReference.bytes(serviceAccountBuilder)); } diff --git a/plugins/repository-gcs/src/yamlRestTest/java/org/opensearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java b/plugins/repository-gcs/src/yamlRestTest/java/org/opensearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java index 5835727af6c4d..6be7005b0e94d 100644 --- a/plugins/repository-gcs/src/yamlRestTest/java/org/opensearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java +++ b/plugins/repository-gcs/src/yamlRestTest/java/org/opensearch/repositories/gcs/RepositoryGcsClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return createParameters(); } } - diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java index 293940903ec89..dcbd52d311230 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java @@ -116,8 +116,8 @@ public InputStream readBlob(String blobName) throws IOException { // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. try { - return store.execute(fileContext -> - new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) + return store.execute( + fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) ); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + blobName + "] blob not found"); @@ -133,7 +133,8 @@ public InputStream readBlob(String blobName, long position, long length) { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { Path blob = new Path(path, blobName); // we pass CREATE, which means it fails if a blob already exists. - final EnumSet flags = failIfAlreadyExists ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) + final EnumSet flags = failIfAlreadyExists + ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) : EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK); store.execute((Operation) fileContext -> { try { @@ -161,8 +162,13 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS }); } - private void writeToPath(InputStream inputStream, long blobSize, FileContext fileContext, Path blobPath, - EnumSet createFlags) throws IOException { + private void writeToPath( + InputStream inputStream, + long blobSize, + FileContext fileContext, + Path blobPath, + EnumSet createFlags + ) throws IOException { final byte[] buffer = new byte[blobSize < bufferSize ? Math.toIntExact(blobSize) : bufferSize]; try (FSDataOutputStream stream = fileContext.create(blobPath, createFlags, CreateOpts.bufferSize(buffer.length))) { int bytesRead; @@ -174,8 +180,9 @@ private void writeToPath(InputStream inputStream, long blobSize, FileContext fil @Override public Map listBlobsByPrefix(@Nullable final String prefix) throws IOException { - FileStatus[] files = store.execute(fileContext -> fileContext.util().listStatus(path, - path -> prefix == null || path.getName().startsWith(prefix))); + FileStatus[] files = store.execute( + fileContext -> fileContext.util().listStatus(path, path -> prefix == null || path.getName().startsWith(prefix)) + ); Map map = new LinkedHashMap<>(); for (FileStatus file : files) { if (file.isFile()) { diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java index d7813f0430cb9..4b715a509a195 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java @@ -115,8 +115,9 @@ private static Void eagerInit() { KerberosInfo info = SecurityUtil.getKerberosInfo(ClientNamenodeProtocolPB.class, null); // Make sure that the correct class loader was installed. if (info == null) { - throw new RuntimeException("Could not initialize SecurityUtil: " + - "Unable to find services for [org.apache.hadoop.security.SecurityInfo]"); + throw new RuntimeException( + "Could not initialize SecurityUtil: " + "Unable to find services for [org.apache.hadoop.security.SecurityInfo]" + ); } } finally { Thread.currentThread().setContextClassLoader(oldCCL); @@ -125,9 +126,15 @@ private static Void eagerInit() { } @Override - public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, - ClusterService clusterService, RecoverySettings recoverySettings) { - return Collections.singletonMap("hdfs", (metadata) -> new HdfsRepository(metadata, env, namedXContentRegistry, clusterService, - recoverySettings)); + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + "hdfs", + (metadata) -> new HdfsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); } } diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index 127d7a75ff52d..3be6fba5322db 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -81,9 +81,9 @@ public HdfsRepository( final Environment environment, final NamedXContentRegistry namedXContentRegistry, final ClusterService clusterService, - final RecoverySettings recoverySettings) { - super(metadata, metadata.settings().getAsBoolean("compress", false), namedXContentRegistry, clusterService, - recoverySettings); + final RecoverySettings recoverySettings + ) { + super(metadata, metadata.settings().getAsBoolean("compress", false), namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); @@ -94,14 +94,24 @@ public HdfsRepository( } uri = URI.create(uriSetting); if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", - uri.getScheme(), - uriSetting)); + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", + uri.getScheme(), + uriSetting + ) + ); } if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting)); + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", + uri.getPath(), + uriSetting + ) + ); } pathSetting = getMetadata().settings().get("path"); @@ -111,7 +121,7 @@ public HdfsRepository( } } - private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositorySettings) { + private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositorySettings) { Configuration hadoopConfiguration = new Configuration(repositorySettings.getAsBoolean("load_defaults", true)); hadoopConfiguration.setClassLoader(HdfsRepository.class.getClassLoader()); hadoopConfiguration.reloadConfiguration(); @@ -147,10 +157,12 @@ private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositoryS } }); - logger.debug("Using file-system [{}] for URI [{}], path [{}]", + logger.debug( + "Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), - path); + path + ); try { return new HdfsBlobStore(fileContext, path, bufferSize, isReadOnly(), haEnabled); @@ -162,9 +174,8 @@ private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositoryS private UserGroupInformation login(Configuration hadoopConfiguration, Settings repositorySettings) { // Validate the authentication method: AuthenticationMethod authMethod = SecurityUtil.getAuthenticationMethod(hadoopConfiguration); - if (authMethod.equals(AuthenticationMethod.SIMPLE) == false - && authMethod.equals(AuthenticationMethod.KERBEROS) == false) { - throw new RuntimeException("Unsupported authorization mode ["+authMethod+"]"); + if (authMethod.equals(AuthenticationMethod.SIMPLE) == false && authMethod.equals(AuthenticationMethod.KERBEROS) == false) { + throw new RuntimeException("Unsupported authorization mode [" + authMethod + "]"); } // Check if the user added a principal to use, and that there is a keytab file provided @@ -172,13 +183,18 @@ private UserGroupInformation login(Configuration hadoopConfiguration, Settings r // Check to see if the authentication method is compatible if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) { - logger.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + - "specified. Continuing with [KERBEROS] authentication."); + logger.warn( + "Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + + "specified. Continuing with [KERBEROS] authentication." + ); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration); } else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) { - throw new RuntimeException("HDFS Repository does not support [KERBEROS] authentication without " + - "a valid Kerberos principal and keytab. Please specify a principal in the repository settings with [" + - CONF_SECURITY_PRINCIPAL + "]."); + throw new RuntimeException( + "HDFS Repository does not support [KERBEROS] authentication without " + + "a valid Kerberos principal and keytab. Please specify a principal in the repository settings with [" + + CONF_SECURITY_PRINCIPAL + + "]." + ); } // Now we can initialize the UGI with the configuration. @@ -214,8 +230,11 @@ private static String preparePrincipal(String originalPrincipal) { } if (originalPrincipal.equals(finalPrincipal) == false) { - logger.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", - originalPrincipal, finalPrincipal); + logger.debug( + "Found service principal. Converted original principal name [{}] to server principal [{}]", + originalPrincipal, + finalPrincipal + ); } } return finalPrincipal; @@ -239,9 +258,9 @@ private static String getHostName() { protected HdfsBlobStore createBlobStore() { // initialize our blobstore using elevated privileges. SpecialPermission.check(); - final HdfsBlobStore blobStore = - AccessController.doPrivileged((PrivilegedAction) - () -> createBlobstore(uri, pathSetting, getMetadata().settings())); + final HdfsBlobStore blobStore = AccessController.doPrivileged( + (PrivilegedAction) () -> createBlobstore(uri, pathSetting, getMetadata().settings()) + ); return blobStore; } diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 45f46a1d76136..9078e2b76cc6d 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -62,15 +62,14 @@ class HdfsSecurityContext { private static final Permission[] KERBEROS_AUTH_PERMISSIONS; static { // We can do FS ops with only a few elevated permissions: - SIMPLE_AUTH_PERMISSIONS = new Permission[]{ + SIMPLE_AUTH_PERMISSIONS = new Permission[] { new SocketPermission("*", "connect"), // 1) hadoop dynamic proxy is messy with access rules new ReflectPermission("suppressAccessChecks"), // 2) allow hadoop to add credentials to our Subject new AuthPermission("modifyPrivateCredentials"), // 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client - new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read") - }; + new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read") }; // If Security is enabled, we need all the following elevated permissions: KERBEROS_AUTH_PERMISSIONS = new Permission[] { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index 91556f68f6eb3..e12b24f27dcdc 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -74,12 +74,12 @@ public void testHAFailoverWithRepository() throws Exception { String nn1Port = "10001"; String nn2Port = "10002"; if (ports.length() > 0) { - final Path path = PathUtils.get(ports); - final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { - return Files.readAllLines(path); - }); - nn1Port = lines.get(0); - nn2Port = lines.get(1); + final Path path = PathUtils.get(ports); + final List lines = AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> { return Files.readAllLines(path); } + ); + nn1Port = lines.get(0); + nn2Port = lines.get(1); } boolean securityEnabled = hdfsKerberosPrincipal != null; @@ -125,21 +125,26 @@ public void testHAFailoverWithRepository() throws Exception { { Request request = new Request("PUT", "/_snapshot/hdfs_ha_repo_read"); request.setJsonEntity( - "{" + - "\"type\":\"hdfs\"," + - "\"settings\":{" + - "\"uri\": \"hdfs://ha-hdfs/\",\n" + - "\"path\": \"/user/opensearch/existing/readonly-repository\"," + - "\"readonly\": \"true\"," + - securityCredentials(securityEnabled, esKerberosPrincipal) + - "\"conf.dfs.nameservices\": \"ha-hdfs\"," + - "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," + - "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + - "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + - "}" + - "}"); + "{" + + "\"type\":\"hdfs\"," + + "\"settings\":{" + + "\"uri\": \"hdfs://ha-hdfs/\",\n" + + "\"path\": \"/user/opensearch/existing/readonly-repository\"," + + "\"readonly\": \"true\"," + + securityCredentials(securityEnabled, esKerberosPrincipal) + + "\"conf.dfs.nameservices\": \"ha-hdfs\"," + + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:" + + nn1Port + + "\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:" + + nn2Port + + "\"," + + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + + "}" + + "}" + ); Response response = client.performRequest(request); Assert.assertEquals(200, response.getStatusLine().getStatusCode()); @@ -163,8 +168,7 @@ public void testHAFailoverWithRepository() throws Exception { private String securityCredentials(boolean securityEnabled, String kerberosPrincipal) { if (securityEnabled) { - return "\"security.principal\": \""+kerberosPrincipal+"\"," + - "\"conf.dfs.data.transfer.protection\": \"authentication\","; + return "\"security.principal\": \"" + kerberosPrincipal + "\"," + "\"conf.dfs.data.transfer.protection\": \"authentication\","; } else { return ""; } @@ -259,11 +263,11 @@ public int run(String[] argv) throws Exception { } public int transitionToStandby(String namenodeID) throws Exception { - return run(new String[]{"-transitionToStandby", namenodeID}); + return run(new String[] { "-transitionToStandby", namenodeID }); } public int transitionToActive(String namenodeID) throws Exception { - return run(new String[]{"-transitionToActive", namenodeID}); + return run(new String[] { "-transitionToActive", namenodeID }); } public void close() { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 7c45bd1bedea9..66677b0327191 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -60,14 +60,13 @@ import static org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase.readBlobFully; import static org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase.writeBlob; -@ThreadLeakFilters(filters = {HdfsClientThreadLeakFilter.class}) +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsBlobStoreContainerTests extends OpenSearchTestCase { private FileContext createTestContext() { FileContext fileContext; try { - fileContext = AccessController.doPrivileged((PrivilegedExceptionAction) - () -> createContext(new URI("hdfs:///"))); + fileContext = AccessController.doPrivileged((PrivilegedExceptionAction) () -> createContext(new URI("hdfs:///"))); } catch (PrivilegedActionException e) { throw new RuntimeException(e.getCause()); } @@ -94,8 +93,7 @@ private FileContext createContext(URI uri) { try { Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name")); - subject = new Subject(false, Collections.singleton(principal), - Collections.emptySet(), Collections.emptySet()); + subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { throw new RuntimeException(e); } diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 0ef35c3a570c8..9196a8f2b0558 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -57,7 +57,8 @@ protected Settings repositorySettings() { .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) .put("path", "foo") .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()).build(); + .put("compress", randomBoolean()) + .build(); } @Override diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java index 4f11f2ea0cc8b..b9b0e9e87dd0c 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java @@ -49,8 +49,7 @@ */ public final class HdfsClientThreadLeakFilter implements ThreadFilter { - private static final String OFFENDING_THREAD_NAME = - "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner"; + private static final String OFFENDING_THREAD_NAME = "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner"; @Override public boolean reject(Thread t) { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index da10419f1123d..4e12de7cce212 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -61,15 +61,19 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = client().admin() + .cluster() + .preparePutRepository(repoName) .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ).get(); + .setSettings( + Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()) + ) + .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index 6eeba0fff7e72..02350499b1466 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -54,7 +54,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@ThreadLeakFilters(filters = {HdfsClientThreadLeakFilter.class}) +@ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsTests extends OpenSearchSingleNodeTestCase { @Override @@ -66,15 +66,19 @@ public void testSimpleWorkflow() { assumeFalse("https://github.com/elastic/elasticsearch/issues/31498", JavaVersion.current().equals(JavaVersion.parse("11"))); Client client = client(); - AcknowledgedResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ).get(); + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository("test-repo") + .setType("hdfs") + .setSettings( + Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()) + ) + .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); createIndex("test-idx-1"); @@ -101,18 +105,15 @@ public void testSimpleWorkflow() { .setIndices("test-idx-*", "-test-idx-3") .get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); - assertThat(client.admin() - .cluster() - .prepareGetSnapshots("test-repo") - .setSnapshots("test-snap") - .get() - .getSnapshots() - .get(0) - .state(), - equalTo(SnapshotState.SUCCESS)); + assertThat( + client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), + equalTo(SnapshotState.SUCCESS) + ); logger.info("--> delete some data"); for (int i = 0; i < 50; i++) { @@ -163,16 +164,13 @@ public void testSimpleWorkflow() { ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); - final BlobStoreRepository repo = - (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); + final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); BlobStoreTestUtil.assertConsistency(repo, repo.threadPool().executor(ThreadPool.Names.GENERIC)); } public void testMissingUri() { try { - client().admin().cluster().preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.EMPTY).get(); + client().admin().cluster().preparePutRepository("test-repo").setType("hdfs").setSettings(Settings.EMPTY).get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -182,10 +180,12 @@ public void testMissingUri() { public void testEmptyUri() { try { - client().admin().cluster().preparePutRepository("test-repo") + client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "/path").build()).get(); + .setSettings(Settings.builder().put("uri", "/path").build()) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -195,10 +195,12 @@ public void testEmptyUri() { public void testNonHdfsUri() { try { - client().admin().cluster().preparePutRepository("test-repo") + client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "file:///").build()).get(); + .setSettings(Settings.builder().put("uri", "file:///").build()) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -208,10 +210,12 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { - client().admin().cluster().preparePutRepository("test-repo") + client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "hdfs:///some/path").build()).get(); + .setSettings(Settings.builder().put("uri", "hdfs:///some/path").build()) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -221,10 +225,12 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { - client().admin().cluster().preparePutRepository("test-repo") + client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("hdfs") - .setSettings(Settings.builder() - .put("uri", "hdfs:///").build()).get(); + .setSettings(Settings.builder().put("uri", "hdfs:///").build()) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java index 31723db025608..a1bb644a3d745 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java @@ -75,7 +75,7 @@ protected org.apache.hadoop.fs.Path getInitialWorkingDirectory() { @Override public void setPermission(org.apache.hadoop.fs.Path path, FsPermission permission) { - // no execution, thank you very much! + // no execution, thank you very much! } // pretend we don't support symlinks (which causes hadoop to want to do crazy things), @@ -125,6 +125,6 @@ public TestingFs(URI uri, Configuration configuration) throws URISyntaxException @Override public void checkPath(org.apache.hadoop.fs.Path path) { - // we do evil stuff, we admit it. + // we do evil stuff, we admit it. } } diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 4a28fbca5aea8..e31a9f8cf3856 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -159,22 +159,38 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository(randomName(), Settings.builder().put(repositorySettings()) - .put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build()); - - final SnapshotId fakeOldSnapshot = client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-old") - .setWaitForCompletion(true).setIndices().get().getSnapshotInfo().snapshotId(); + final String repoName = createRepository( + randomName(), + Settings.builder().put(repositorySettings()).put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build() + ); + + final SnapshotId fakeOldSnapshot = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, "snapshot-old") + .setWaitForCompletion(true) + .setIndices() + .get() + .getSnapshotInfo() + .snapshotId(); final RepositoriesService repositoriesService = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); - final RepositoryData modifiedRepositoryData = repositoryData.withVersions(Collections.singletonMap(fakeOldSnapshot, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion())); - final BytesReference serialized = BytesReference.bytes(modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), - SnapshotsService.OLD_SNAPSHOT_FORMAT)); + final RepositoryData modifiedRepositoryData = repositoryData.withVersions( + Collections.singletonMap(fakeOldSnapshot, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion()) + ); + final BytesReference serialized = BytesReference.bytes( + modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT) + ); PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> { try (InputStream stream = serialized.streamInput()) { - repository.blobStore().blobContainer(repository.basePath()).writeBlobAtomic( - BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), stream, serialized.length(), true); + repository.blobStore() + .blobContainer(repository.basePath()) + .writeBlobAtomic( + BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), + stream, + serialized.length(), + true + ); } }))); @@ -209,8 +225,12 @@ public List> getSettings() { } @Override - protected S3Repository createRepository(RepositoryMetadata metadata, NamedXContentRegistry registry, - ClusterService clusterService, RecoverySettings recoverySettings) { + protected S3Repository createRepository( + RepositoryMetadata metadata, + NamedXContentRegistry registry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { return new S3Repository(metadata, registry, service, clusterService, recoverySettings) { @Override @@ -225,8 +245,7 @@ long getLargeBlobThresholdInBytes() { } @Override - void ensureMultiPartUploadSize(long blobSize) { - } + void ensureMultiPartUploadSize(long blobSize) {} }; } }; @@ -307,10 +326,9 @@ public void maybeTrack(final String request, Headers requestHeaders) { } private boolean isMultiPartUpload(String request) { - return Regex.simpleMatch("POST /*/*?uploads", request) || - Regex.simpleMatch("POST /*/*?*uploadId=*", request) || - Regex.simpleMatch("PUT /*/*?*uploadId=*", request); + return Regex.simpleMatch("POST /*/*?uploads", request) + || Regex.simpleMatch("POST /*/*?*uploadId=*", request) + || Regex.simpleMatch("PUT /*/*?*uploadId=*", request); } } } - diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index c27a96a42fa5c..bc2839d066092 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -80,15 +80,23 @@ protected void createRepository(String repoName) { } else { // only test different storage classes when running against the default endpoint, i.e. a genuine S3 service if (randomBoolean()) { - final String storageClass - = randomFrom("standard", "reduced_redundancy", "standard_ia", "onezone_ia", "intelligent_tiering"); + final String storageClass = randomFrom( + "standard", + "reduced_redundancy", + "standard_ia", + "onezone_ia", + "intelligent_tiering" + ); logger.info("--> using storage_class [{}]", storageClass); settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = client().admin() + .cluster() + .preparePutRepository("test-repo") .setType("s3") - .setSettings(settings).get(); + .setSettings(settings) + .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BasicSessionCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BasicSessionCredentials.java index 2472d9c43de00..2264dd6cde297 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BasicSessionCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BasicSessionCredentials.java @@ -58,9 +58,9 @@ public boolean equals(final Object o) { return false; } final S3BasicSessionCredentials that = (S3BasicSessionCredentials) o; - return sessionToken.equals(that.sessionToken) && - getAWSAccessKeyId().equals(that.getAWSAccessKeyId()) && - getAWSSecretKey().equals(that.getAWSSecretKey()); + return sessionToken.equals(that.sessionToken) + && getAWSAccessKeyId().equals(that.getAWSAccessKeyId()) + && getAWSSecretKey().equals(that.getAWSSecretKey()); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index d9ad8caf2f814..5a9c03c0b2a37 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -103,7 +103,7 @@ public boolean blobExists(String blobName) { try (AmazonS3Reference clientReference = blobStore.clientReference()) { return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); } catch (final Exception e) { - throw new BlobStoreException("Failed to check if blob [" + blobName +"] exists", e); + throw new BlobStoreException("Failed to check if blob [" + blobName + "] exists", e); } } @@ -178,11 +178,11 @@ public DeleteResult delete() throws IOException { list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); } final List blobsToDelete = new ArrayList<>(); - list.getObjectSummaries().forEach(s3ObjectSummary -> { - deletedBlobs.incrementAndGet(); - deletedBytes.addAndGet(s3ObjectSummary.getSize()); - blobsToDelete.add(s3ObjectSummary.getKey()); - }); + list.getObjectSummaries().forEach(s3ObjectSummary -> { + deletedBlobs.incrementAndGet(); + deletedBytes.addAndGet(s3ObjectSummary.getSize()); + blobsToDelete.add(s3ObjectSummary.getKey()); + }); if (list.isTruncated()) { doDeleteBlobs(blobsToDelete, false); prevListing = list; @@ -220,7 +220,7 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx final List partition = new ArrayList<>(); for (String key : outstanding) { partition.add(key); - if (partition.size() == MAX_BULK_DELETES ) { + if (partition.size() == MAX_BULK_DELETES) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); } @@ -231,8 +231,10 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx SocketAccess.doPrivilegedVoid(() -> { AmazonClientException aex = null; for (DeleteObjectsRequest deleteRequest : deleteRequests) { - List keysInRequest = - deleteRequest.getKeys().stream().map(DeleteObjectsRequest.KeyVersion::getKey).collect(Collectors.toList()); + List keysInRequest = deleteRequest.getKeys() + .stream() + .map(DeleteObjectsRequest.KeyVersion::getKey) + .collect(Collectors.toList()); try { clientReference.client().deleteObjects(deleteRequest); outstanding.removeAll(keysInRequest); @@ -241,11 +243,18 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx // first remove all keys that were sent in the request and then add back those that ran into an exception. outstanding.removeAll(keysInRequest); outstanding.addAll( - e.getErrors().stream().map(MultiObjectDeleteException.DeleteError::getKey).collect(Collectors.toSet())); + e.getErrors().stream().map(MultiObjectDeleteException.DeleteError::getKey).collect(Collectors.toSet()) + ); logger.warn( - () -> new ParameterizedMessage("Failed to delete some blobs {}", e.getErrors() - .stream().map(err -> "[" + err.getKey() + "][" + err.getCode() + "][" + err.getMessage() + "]") - .collect(Collectors.toList())), e); + () -> new ParameterizedMessage( + "Failed to delete some blobs {}", + e.getErrors() + .stream() + .map(err -> "[" + err.getKey() + "][" + err.getCode() + "][" + err.getMessage() + "]") + .collect(Collectors.toList()) + ), + e + ); aex = ExceptionsHelper.useOrSuppress(aex, e); } catch (AmazonClientException e) { // The AWS client threw any unexpected exception and did not execute the request at all so we do not @@ -270,8 +279,7 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { - return executeListing(clientReference, listObjectsRequest(blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix))) - .stream() + return executeListing(clientReference, listObjectsRequest(blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix))).stream() .flatMap(listing -> listing.getObjectSummaries().stream()) .map(summary -> new PlainBlobMetadata(summary.getKey().substring(keyPath.length()), summary.getSize())) .collect(Collectors.toMap(PlainBlobMetadata::name, Function.identity())); @@ -289,16 +297,16 @@ public Map listBlobs() throws IOException { public Map children() throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { return executeListing(clientReference, listObjectsRequest(keyPath)).stream().flatMap(listing -> { - assert listing.getObjectSummaries().stream().noneMatch(s -> { - for (String commonPrefix : listing.getCommonPrefixes()) { - if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { - return true; - } + assert listing.getObjectSummaries().stream().noneMatch(s -> { + for (String commonPrefix : listing.getCommonPrefixes()) { + if (s.getKey().substring(keyPath.length()).startsWith(commonPrefix)) { + return true; } - return false; - }) : "Response contained children for listed common prefixes."; - return listing.getCommonPrefixes().stream(); - }) + } + return false; + }) : "Response contained children for listed common prefixes."; + return listing.getCommonPrefixes().stream(); + }) .map(prefix -> prefix.substring(keyPath.length())) .filter(name -> name.isEmpty() == false) // Stripping the trailing slash off of the common prefix @@ -331,7 +339,9 @@ private static List executeListing(AmazonS3Reference clientRefere } private ListObjectsRequest listObjectsRequest(String keyPath) { - return new ListObjectsRequest().withBucketName(blobStore.bucket()).withPrefix(keyPath).withDelimiter("/") + return new ListObjectsRequest().withBucketName(blobStore.bucket()) + .withPrefix(keyPath) + .withDelimiter("/") .withRequestMetricCollector(blobStore.listMetricCollector); } @@ -342,10 +352,8 @@ private String buildKey(String blobName) { /** * Uploads a blob using a single upload request */ - void executeSingleUpload(final S3BlobStore blobStore, - final String blobName, - final InputStream input, - final long blobSize) throws IOException { + void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) + throws IOException { // Extra safety checks if (blobSize > MAX_FILE_SIZE.getBytes()) { @@ -366,9 +374,7 @@ void executeSingleUpload(final S3BlobStore blobStore, putRequest.setRequestMetricCollector(blobStore.putMetricCollector); try (AmazonS3Reference clientReference = blobStore.clientReference()) { - SocketAccess.doPrivilegedVoid(() -> { - clientReference.client().putObject(putRequest); - }); + SocketAccess.doPrivilegedVoid(() -> { clientReference.client().putObject(putRequest); }); } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); } @@ -377,10 +383,8 @@ void executeSingleUpload(final S3BlobStore blobStore, /** * Uploads a blob using multipart upload requests. */ - void executeMultipartUpload(final S3BlobStore blobStore, - final String blobName, - final InputStream input, - final long blobSize) throws IOException { + void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) + throws IOException { ensureMultiPartUploadSize(blobSize); final long partSize = blobStore.bufferSizeInBytes(); @@ -440,12 +444,17 @@ void executeMultipartUpload(final S3BlobStore blobStore, } if (bytesCount != blobSize) { - throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize - + "bytes sent but got " + bytesCount); + throw new IOException( + "Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount + ); } - final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), - parts); + final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest( + bucketName, + blobName, + uploadId.get(), + parts + ); complRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); success = true; @@ -465,12 +474,14 @@ void executeMultipartUpload(final S3BlobStore blobStore, // non-static, package private for testing void ensureMultiPartUploadSize(final long blobSize) { if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) { - throw new IllegalArgumentException("Multipart upload request size [" + blobSize - + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART); + throw new IllegalArgumentException( + "Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART + ); } if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) { - throw new IllegalArgumentException("Multipart upload request size [" + blobSize - + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART); + throw new IllegalArgumentException( + "Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART + ); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 73a2af93a7f34..90f8bbb6612d4 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -78,9 +78,15 @@ class S3BlobStore implements BlobStore { final RequestMetricCollector putMetricCollector; final RequestMetricCollector multiPartUploadMetricCollector; - S3BlobStore(S3Service service, String bucket, boolean serverSideEncryption, - ByteSizeValue bufferSize, String cannedACL, String storageClass, - RepositoryMetadata repositoryMetadata) { + S3BlobStore( + S3Service service, + String bucket, + boolean serverSideEncryption, + ByteSizeValue bufferSize, + String cannedACL, + String storageClass, + RepositoryMetadata repositoryMetadata + ) { this.service = service; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; @@ -112,16 +118,14 @@ public void collectMetrics(Request request, Response response) { this.multiPartUploadMetricCollector = new RequestMetricCollector() { @Override public void collectMetrics(Request request, Response response) { - assert request.getHttpMethod().name().equals("PUT") - || request.getHttpMethod().name().equals("POST"); + assert request.getHttpMethod().name().equals("PUT") || request.getHttpMethod().name().equals("POST"); stats.postCount.addAndGet(getRequestCount(request)); } }; } private long getRequestCount(Request request) { - Number requestCount = request.getAWSRequestMetrics().getTimingInfo() - .getCounter(AWSRequestMetrics.Field.RequestCount.name()); + Number requestCount = request.getAWSRequestMetrics().getTimingInfo().getCounter(AWSRequestMetrics.Field.RequestCount.name()); if (requestCount == null) { logger.warn("Expected request count to be tracked for request [{}] but found not count.", request); return 0L; diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index b53fabaa90a98..805f48aae9b2d 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -61,68 +61,116 @@ final class S3ClientSettings { private static final String PLACEHOLDER_CLIENT = "placeholder"; /** The access key (ie login id) for connecting to s3. */ - static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", - key -> SecureSetting.secureString(key, null)); + static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting( + PREFIX, + "access_key", + key -> SecureSetting.secureString(key, null) + ); /** The secret key (ie password) for connecting to s3. */ - static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", - key -> SecureSetting.secureString(key, null)); + static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting( + PREFIX, + "secret_key", + key -> SecureSetting.secureString(key, null) + ); /** The secret key (ie password) for connecting to s3. */ - static final Setting.AffixSetting SESSION_TOKEN_SETTING = Setting.affixKeySetting(PREFIX, "session_token", - key -> SecureSetting.secureString(key, null)); + static final Setting.AffixSetting SESSION_TOKEN_SETTING = Setting.affixKeySetting( + PREFIX, + "session_token", + key -> SecureSetting.secureString(key, null) + ); /** An override for the s3 endpoint to connect to. */ - static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope)); + static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting( + PREFIX, + "endpoint", + key -> new Setting<>(key, "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope) + ); /** The protocol to use to connect to s3. */ - static final Setting.AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting(PREFIX, "protocol", - key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope)); + static final Setting.AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting( + PREFIX, + "protocol", + key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) + ); /** The host name of a proxy to connect to s3 through. */ - static final Setting.AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", - key -> Setting.simpleString(key, Property.NodeScope)); + static final Setting.AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.host", + key -> Setting.simpleString(key, Property.NodeScope) + ); /** The port of a proxy to connect to s3 through. */ - static final Setting.AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", - key -> Setting.intSetting(key, 80, 0, 1<<16, Property.NodeScope)); + static final Setting.AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.port", + key -> Setting.intSetting(key, 80, 0, 1 << 16, Property.NodeScope) + ); /** The username of a proxy to connect to s3 through. */ - static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", - key -> SecureSetting.secureString(key, null)); + static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.username", + key -> SecureSetting.secureString(key, null) + ); /** The password of a proxy to connect to s3 through. */ - static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", - key -> SecureSetting.secureString(key, null)); + static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.password", + key -> SecureSetting.secureString(key, null) + ); /** The socket timeout for connecting to s3. */ - static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope)); + static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting( + PREFIX, + "read_timeout", + key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope) + ); /** The number of retries to use when an s3 request fails. */ - static final Setting.AffixSetting MAX_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "max_retries", - key -> Setting.intSetting(key, ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry(), 0, Property.NodeScope)); + static final Setting.AffixSetting MAX_RETRIES_SETTING = Setting.affixKeySetting( + PREFIX, + "max_retries", + key -> Setting.intSetting(key, ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry(), 0, Property.NodeScope) + ); /** Whether retries should be throttled (ie use backoff). */ - static final Setting.AffixSetting USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "use_throttle_retries", - key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope)); + static final Setting.AffixSetting USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting( + PREFIX, + "use_throttle_retries", + key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope) + ); /** Whether the s3 client should use path style access. */ - static final Setting.AffixSetting USE_PATH_STYLE_ACCESS = Setting.affixKeySetting(PREFIX, "path_style_access", - key -> Setting.boolSetting(key, false, Property.NodeScope)); + static final Setting.AffixSetting USE_PATH_STYLE_ACCESS = Setting.affixKeySetting( + PREFIX, + "path_style_access", + key -> Setting.boolSetting(key, false, Property.NodeScope) + ); /** Whether chunked encoding should be disabled or not (Default is false). */ - static final Setting.AffixSetting DISABLE_CHUNKED_ENCODING = Setting.affixKeySetting(PREFIX, "disable_chunked_encoding", - key -> Setting.boolSetting(key, false, Property.NodeScope)); + static final Setting.AffixSetting DISABLE_CHUNKED_ENCODING = Setting.affixKeySetting( + PREFIX, + "disable_chunked_encoding", + key -> Setting.boolSetting(key, false, Property.NodeScope) + ); /** An override for the s3 region to use for signing requests. */ - static final Setting.AffixSetting REGION = Setting.affixKeySetting(PREFIX, "region", - key -> new Setting<>(key, "", Function.identity(), Property.NodeScope)); + static final Setting.AffixSetting REGION = Setting.affixKeySetting( + PREFIX, + "region", + key -> new Setting<>(key, "", Function.identity(), Property.NodeScope) + ); /** An override for the signer to use. */ - static final Setting.AffixSetting SIGNER_OVERRIDE = Setting.affixKeySetting(PREFIX, "signer_override", - key -> new Setting<>(key, "", Function.identity(), Property.NodeScope)); + static final Setting.AffixSetting SIGNER_OVERRIDE = Setting.affixKeySetting( + PREFIX, + "signer_override", + key -> new Setting<>(key, "", Function.identity(), Property.NodeScope) + ); /** Credentials to authenticate with s3. */ final S3BasicCredentials credentials; @@ -168,10 +216,22 @@ final class S3ClientSettings { /** Signer override to use or empty string to use default. */ final String signerOverride; - private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protocol protocol, - String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, - int readTimeoutMillis, int maxRetries, boolean throttleRetries, - boolean pathStyleAccess, boolean disableChunkedEncoding, String region, String signerOverride) { + private S3ClientSettings( + S3BasicCredentials credentials, + String endpoint, + Protocol protocol, + String proxyHost, + int proxyPort, + String proxyUsername, + String proxyPassword, + int readTimeoutMillis, + int maxRetries, + boolean throttleRetries, + boolean pathStyleAccess, + boolean disableChunkedEncoding, + String region, + String signerOverride + ) { this.credentials = credentials; this.endpoint = endpoint; this.protocol = protocol; @@ -196,20 +256,26 @@ private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protoc */ S3ClientSettings refine(Settings repositorySettings) { // Normalize settings to placeholder client settings prefix so that we can use the affix settings directly - final Settings normalizedSettings = - Settings.builder().put(repositorySettings).normalizePrefix(PREFIX + PLACEHOLDER_CLIENT + '.').build(); + final Settings normalizedSettings = Settings.builder() + .put(repositorySettings) + .normalizePrefix(PREFIX + PLACEHOLDER_CLIENT + '.') + .build(); final String newEndpoint = getRepoSettingOrDefault(ENDPOINT_SETTING, normalizedSettings, endpoint); final Protocol newProtocol = getRepoSettingOrDefault(PROTOCOL_SETTING, normalizedSettings, protocol); final String newProxyHost = getRepoSettingOrDefault(PROXY_HOST_SETTING, normalizedSettings, proxyHost); final int newProxyPort = getRepoSettingOrDefault(PROXY_PORT_SETTING, normalizedSettings, proxyPort); final int newReadTimeoutMillis = Math.toIntExact( - getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis()); + getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis() + ); final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean newPathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); final boolean newDisableChunkedEncoding = getRepoSettingOrDefault( - DISABLE_CHUNKED_ENCODING, normalizedSettings, disableChunkedEncoding); + DISABLE_CHUNKED_ENCODING, + normalizedSettings, + disableChunkedEncoding + ); final S3BasicCredentials newCredentials; if (checkDeprecatedCredentials(repositorySettings)) { newCredentials = loadDeprecatedCredentials(repositorySettings); @@ -218,12 +284,18 @@ S3ClientSettings refine(Settings repositorySettings) { } final String newRegion = getRepoSettingOrDefault(REGION, normalizedSettings, region); final String newSignerOverride = getRepoSettingOrDefault(SIGNER_OVERRIDE, normalizedSettings, signerOverride); - if (Objects.equals(endpoint, newEndpoint) && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) - && proxyPort == newProxyPort && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries - && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials) + if (Objects.equals(endpoint, newEndpoint) + && protocol == newProtocol + && Objects.equals(proxyHost, newProxyHost) + && proxyPort == newProxyPort + && newReadTimeoutMillis == readTimeoutMillis + && maxRetries == newMaxRetries + && newThrottleRetries == throttleRetries + && Objects.equals(credentials, newCredentials) && newPathStyleAccess == pathStyleAccess && newDisableChunkedEncoding == disableChunkedEncoding - && Objects.equals(region, newRegion) && Objects.equals(signerOverride, newSignerOverride)) { + && Objects.equals(region, newRegion) + && Objects.equals(signerOverride, newSignerOverride)) { return this; } return new S3ClientSettings( @@ -266,13 +338,23 @@ static Map load(Settings settings) { static boolean checkDeprecatedCredentials(Settings repositorySettings) { if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() - + " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); + throw new IllegalArgumentException( + "Repository setting [" + + S3Repository.ACCESS_KEY_SETTING.getKey() + + " must be accompanied by setting [" + + S3Repository.SECRET_KEY_SETTING.getKey() + + "]" + ); } return true; } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() - + " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); + throw new IllegalArgumentException( + "Repository setting [" + + S3Repository.SECRET_KEY_SETTING.getKey() + + " must be accompanied by setting [" + + S3Repository.ACCESS_KEY_SETTING.getKey() + + "]" + ); } return false; } @@ -280,16 +362,20 @@ static boolean checkDeprecatedCredentials(Settings repositorySettings) { // backcompat for reading keys out of repository settings (clusterState) private static S3BasicCredentials loadDeprecatedCredentials(Settings repositorySettings) { assert checkDeprecatedCredentials(repositorySettings); - try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); - SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { + try ( + SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings) + ) { return new S3BasicCredentials(key.toString(), secret.toString()); } } private static S3BasicCredentials loadCredentials(Settings settings, String clientName) { - try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); - SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); - SecureString sessionToken = getConfigValue(settings, clientName, SESSION_TOKEN_SETTING)) { + try ( + SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); + SecureString sessionToken = getConfigValue(settings, clientName, SESSION_TOKEN_SETTING) + ) { if (accessKey.length() != 0) { if (secretKey.length() != 0) { if (sessionToken.length() != 0) { @@ -315,8 +401,10 @@ private static S3BasicCredentials loadCredentials(Settings settings, String clie // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(final Settings settings, final String clientName) { - try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { + try ( + SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING) + ) { return new S3ClientSettings( S3ClientSettings.loadCredentials(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), @@ -345,29 +433,41 @@ public boolean equals(final Object o) { return false; } final S3ClientSettings that = (S3ClientSettings) o; - return proxyPort == that.proxyPort && - readTimeoutMillis == that.readTimeoutMillis && - maxRetries == that.maxRetries && - throttleRetries == that.throttleRetries && - Objects.equals(credentials, that.credentials) && - Objects.equals(endpoint, that.endpoint) && - protocol == that.protocol && - Objects.equals(proxyHost, that.proxyHost) && - Objects.equals(proxyUsername, that.proxyUsername) && - Objects.equals(proxyPassword, that.proxyPassword) && - Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) && - Objects.equals(region, that.region) && - Objects.equals(signerOverride, that.signerOverride); + return proxyPort == that.proxyPort + && readTimeoutMillis == that.readTimeoutMillis + && maxRetries == that.maxRetries + && throttleRetries == that.throttleRetries + && Objects.equals(credentials, that.credentials) + && Objects.equals(endpoint, that.endpoint) + && protocol == that.protocol + && Objects.equals(proxyHost, that.proxyHost) + && Objects.equals(proxyUsername, that.proxyUsername) + && Objects.equals(proxyPassword, that.proxyPassword) + && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) + && Objects.equals(region, that.region) + && Objects.equals(signerOverride, that.signerOverride); } @Override public int hashCode() { - return Objects.hash(credentials, endpoint, protocol, proxyHost, proxyPort, proxyUsername, proxyPassword, - readTimeoutMillis, maxRetries, throttleRetries, disableChunkedEncoding, region, signerOverride); + return Objects.hash( + credentials, + endpoint, + protocol, + proxyHost, + proxyPort, + proxyUsername, + proxyPassword, + readTimeoutMillis, + maxRetries, + throttleRetries, + disableChunkedEncoding, + region, + signerOverride + ); } - private static T getConfigValue(Settings settings, String clientName, - Setting.AffixSetting clientSetting) { + private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 1de31ad879e82..c8377949a6842 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -104,11 +104,10 @@ class S3Repository extends MeteredBlobStoreRepository { private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue( Math.max( ByteSizeUnit.MB.toBytes(5), // minimum value - Math.min( - ByteSizeUnit.MB.toBytes(100), - JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)), - ByteSizeUnit.BYTES); - + Math.min(ByteSizeUnit.MB.toBytes(100), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20) + ), + ByteSizeUnit.BYTES + ); static final Setting BUCKET_SETTING = Setting.simpleString("bucket"); @@ -146,14 +145,22 @@ class S3Repository extends MeteredBlobStoreRepository { * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the * use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size. */ - static final Setting BUFFER_SIZE_SETTING = - Setting.byteSizeSetting("buffer_size", DEFAULT_BUFFER_SIZE, MIN_PART_SIZE_USING_MULTIPART, MAX_PART_SIZE_USING_MULTIPART); + static final Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting( + "buffer_size", + DEFAULT_BUFFER_SIZE, + MIN_PART_SIZE_USING_MULTIPART, + MAX_PART_SIZE_USING_MULTIPART + ); /** * Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g. */ - static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB), - new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); + static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting( + "chunk_size", + new ByteSizeValue(1, ByteSizeUnit.GB), + new ByteSizeValue(5, ByteSizeUnit.MB), + new ByteSizeValue(5, ByteSizeUnit.TB) + ); /** * When set to true metadata files are stored in compressed format. This setting doesn’t affect index @@ -190,7 +197,8 @@ class S3Repository extends MeteredBlobStoreRepository { "cooldown_period", new TimeValue(3, TimeUnit.MINUTES), new TimeValue(0, TimeUnit.MILLISECONDS), - Setting.Property.Dynamic); + Setting.Property.Dynamic + ); /** * Specifies the path within bucket to repository data. Defaults to root directory. @@ -225,17 +233,20 @@ class S3Repository extends MeteredBlobStoreRepository { * Constructs an s3 backed repository */ S3Repository( - final RepositoryMetadata metadata, - final NamedXContentRegistry namedXContentRegistry, - final S3Service service, - final ClusterService clusterService, - final RecoverySettings recoverySettings) { - super(metadata, + final RepositoryMetadata metadata, + final NamedXContentRegistry namedXContentRegistry, + final S3Service service, + final ClusterService clusterService, + final RecoverySettings recoverySettings + ) { + super( + metadata, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, recoverySettings, - buildLocation(metadata)); + buildLocation(metadata) + ); this.service = service; this.repositoryMetadata = metadata; @@ -251,8 +262,17 @@ class S3Repository extends MeteredBlobStoreRepository { // We make sure that chunkSize is bigger or equal than/to bufferSize if (this.chunkSize.getBytes() < bufferSize.getBytes()) { - throw new RepositoryException(metadata.name(), CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize + - ") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); + throw new RepositoryException( + metadata.name(), + CHUNK_SIZE_SETTING.getKey() + + " (" + + this.chunkSize + + ") can't be lower than " + + BUFFER_SIZE_SETTING.getKey() + + " (" + + bufferSize + + ")." + ); } final String basePath = BASE_PATH_SETTING.get(metadata.settings()); @@ -269,26 +289,33 @@ class S3Repository extends MeteredBlobStoreRepository { if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { // provided repository settings - deprecationLogger.deprecate("s3_repository_secret_settings", - "Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the opensearch keystore for secure settings."); + deprecationLogger.deprecate( + "s3_repository_secret_settings", + "Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the opensearch keystore for secure settings." + ); } coolDown = COOLDOWN_PERIOD.get(metadata.settings()); logger.debug( - "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", - bucket, - chunkSize, - serverSideEncryption, - bufferSize, - cannedACL, - storageClass); + "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", + bucket, + chunkSize, + serverSideEncryption, + bufferSize, + cannedACL, + storageClass + ); } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of("base_path", BASE_PATH_SETTING.get(metadata.settings()), - "bucket", BUCKET_SETTING.get(metadata.settings())); + return org.opensearch.common.collect.Map.of( + "base_path", + BASE_PATH_SETTING.get(metadata.settings()), + "bucket", + BUCKET_SETTING.get(metadata.settings()) + ); } /** @@ -298,20 +325,36 @@ private static Map buildLocation(RepositoryMetadata metadata) { private final AtomicReference finalizationFuture = new AtomicReference<>(); @Override - public void finalizeSnapshot(ShardGenerations shardGenerations, long repositoryStateId, Metadata clusterMetadata, - SnapshotInfo snapshotInfo, Version repositoryMetaVersion, - Function stateTransformer, - ActionListener listener) { + public void finalizeSnapshot( + ShardGenerations shardGenerations, + long repositoryStateId, + Metadata clusterMetadata, + SnapshotInfo snapshotInfo, + Version repositoryMetaVersion, + Function stateTransformer, + ActionListener listener + ) { if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { listener = delayedListener(listener); } - super.finalizeSnapshot(shardGenerations, repositoryStateId, clusterMetadata, snapshotInfo, repositoryMetaVersion, - stateTransformer, listener); + super.finalizeSnapshot( + shardGenerations, + repositoryStateId, + clusterMetadata, + snapshotInfo, + repositoryMetaVersion, + stateTransformer, + listener + ); } @Override - public void deleteSnapshots(Collection snapshotIds, long repositoryStateId, Version repositoryMetaVersion, - ActionListener listener) { + public void deleteSnapshots( + Collection snapshotIds, + long repositoryStateId, + Version repositoryMetaVersion, + ActionListener listener + ) { if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { listener = delayedListener(listener); } @@ -332,8 +375,12 @@ private ActionListener delayedListener(ActionListener listener) { public void onResponse(T response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), - coolDown, ThreadPool.Names.SNAPSHOT)); + threadPool.schedule( + ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), + coolDown, + ThreadPool.Names.SNAPSHOT + ) + ); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } @@ -341,19 +388,24 @@ public void onResponse(T response) { public void onFailure(Exception e) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT)); + threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT) + ); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } }; } private void logCooldownInfo() { - logger.info("Sleeping for [{}] after modifying repository [{}] because it contains snapshots older than version [{}]" + - " and therefore is using a backwards compatible metadata format that requires this cooldown period to avoid " + - "repository corruption. To get rid of this message and move to the new repository metadata format, either remove " + - "all snapshots older than version [{}] from the repository or create a new repository at an empty location.", - coolDown, metadata.name(), SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION); + logger.info( + "Sleeping for [{}] after modifying repository [{}] because it contains snapshots older than version [{}]" + + " and therefore is using a backwards compatible metadata format that requires this cooldown period to avoid " + + "repository corruption. To get rid of this message and move to the new repository metadata format, either remove " + + "all snapshots older than version [{}] from the repository or create a new repository at an empty location.", + coolDown, + metadata.name(), + SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, + SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION + ); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index de527b6044faa..679243b28cfc7 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -94,15 +94,22 @@ protected S3Repository createRepository( final RepositoryMetadata metadata, final NamedXContentRegistry registry, final ClusterService clusterService, - final RecoverySettings recoverySettings) { + final RecoverySettings recoverySettings + ) { return new S3Repository(metadata, registry, service, clusterService, recoverySettings); } @Override - public Map getRepositories(final Environment env, final NamedXContentRegistry registry, - final ClusterService clusterService, final RecoverySettings recoverySettings) { - return Collections.singletonMap(S3Repository.TYPE, metadata -> createRepository(metadata, registry, clusterService, - recoverySettings)); + public Map getRepositories( + final Environment env, + final NamedXContentRegistry registry, + final ClusterService clusterService, + final RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + S3Repository.TYPE, + metadata -> createRepository(metadata, registry, clusterService, recoverySettings) + ); } @Override @@ -125,7 +132,8 @@ public List> getSettings() { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING, S3ClientSettings.SIGNER_OVERRIDE, - S3ClientSettings.REGION); + S3ClientSettings.REGION + ); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index e6c697d271c7a..82c3367679c53 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -102,8 +102,12 @@ private void openStream() throws IOException { final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector); if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { - assert start + currentOffset <= end : - "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; + assert start + currentOffset <= end : "requesting beyond end, start = " + + start + + " offset=" + + currentOffset + + " end=" + + end; getObjectRequest.setRange(Math.addExact(start, currentOffset), end); } final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); @@ -126,8 +130,13 @@ private long getStreamLength(final S3Object object) { final Long[] range = metadata.getContentRange(); if (range != null) { assert range[1] >= range[0] : range[1] + " vs " + range[0]; - assert range[0] == start + currentOffset : - "Content-Range start value [" + range[0] + "] exceeds start [" + start + "] + current offset [" + currentOffset + ']'; + assert range[0] == start + currentOffset : "Content-Range start value [" + + range[0] + + "] exceeds start [" + + start + + "] + current offset [" + + currentOffset + + ']'; assert range[1] == end : "Content-Range end value [" + range[1] + "] exceeds end [" + end + ']'; return range[1] - range[0] + 1L; } @@ -183,12 +192,30 @@ private void ensureOpen() { private void reopenStreamOrFail(IOException e) throws IOException { if (attempt >= maxAttempts) { - logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], giving up", - blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e); + logger.debug( + new ParameterizedMessage( + "failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], giving up", + blobStore.bucket(), + blobKey, + start + currentOffset, + attempt, + maxAttempts + ), + e + ); throw addSuppressedExceptions(e); } - logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], retrying", - blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e); + logger.debug( + new ParameterizedMessage( + "failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], retrying", + blobStore.bucket(), + blobKey, + start + currentOffset, + attempt, + maxAttempts + ), + e + ); attempt += 1; if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { failures.add(e); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 1bdde7e3790f2..1f5cb2a752eef 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -55,7 +55,6 @@ import static java.util.Collections.emptyMap; - class S3Service implements Closeable { private static final Logger logger = LogManager.getLogger(S3Service.class); @@ -65,7 +64,8 @@ class S3Service implements Closeable { * Client settings calculated from static configuration and settings in the keystore. */ private volatile Map staticClientSettings = MapBuilder.newMapBuilder() - .put("default", S3ClientSettings.getClientSettings(Settings.EMPTY, "default")).immutableMap(); + .put("default", S3ClientSettings.getClientSettings(Settings.EMPTY, "default")) + .immutableMap(); /** * Client settings derived from those in {@link #staticClientSettings} by combining them with settings @@ -140,8 +140,12 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { return newSettings; } } - throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " - + Strings.collectionToDelimitedString(staticClientSettings.keySet(), ",")); + throw new IllegalArgumentException( + "Unknown s3 client name [" + + clientName + + "]. Existing client configs: " + + Strings.collectionToDelimitedString(staticClientSettings.keySet(), ",") + ); } // proxy for testing diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AmazonS3Wrapper.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AmazonS3Wrapper.java index 326932b2f7064..47f2e4fa14297 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AmazonS3Wrapper.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AmazonS3Wrapper.java @@ -156,14 +156,14 @@ public void setS3ClientOptions(S3ClientOptions clientOptions) { } @Override - public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) - throws AmazonClientException, AmazonServiceException { + public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, + AmazonServiceException { delegate.changeObjectStorageClass(bucketName, key, newStorageClass); } @Override - public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) - throws AmazonClientException, AmazonServiceException { + public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws AmazonClientException, + AmazonServiceException { delegate.setObjectRedirectLocation(bucketName, key, newRedirectLocation); } @@ -193,14 +193,20 @@ public VersionListing listVersions(String bucketName, String prefix) throws Amaz } @Override - public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing) - throws AmazonClientException, AmazonServiceException { + public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing) throws AmazonClientException, + AmazonServiceException { return delegate.listNextBatchOfVersions(previousVersionListing); } @Override - public VersionListing listVersions(String bucketName, String prefix, String keyMarker, String versionIdMarker, - String delimiter, Integer maxResults) throws AmazonClientException, AmazonServiceException { + public VersionListing listVersions( + String bucketName, + String prefix, + String keyMarker, + String versionIdMarker, + String delimiter, + Integer maxResults + ) throws AmazonClientException, AmazonServiceException { return delegate.listVersions(bucketName, prefix, keyMarker, versionIdMarker, delimiter, maxResults); } @@ -235,8 +241,8 @@ public String getBucketLocation(String bucketName) throws AmazonClientException, } @Override - public String getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) - throws AmazonClientException, AmazonServiceException { + public String getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketLocation(getBucketLocationRequest); } @@ -251,8 +257,8 @@ public Bucket createBucket(String bucketName) throws AmazonClientException, Amaz } @Override - public Bucket createBucket(String bucketName, com.amazonaws.services.s3.model.Region region) - throws AmazonClientException, AmazonServiceException { + public Bucket createBucket(String bucketName, com.amazonaws.services.s3.model.Region region) throws AmazonClientException, + AmazonServiceException { return delegate.createBucket(bucketName, region); } @@ -267,8 +273,8 @@ public AccessControlList getObjectAcl(String bucketName, String key) throws Amaz } @Override - public AccessControlList getObjectAcl(String bucketName, String key, String versionId) - throws AmazonClientException, AmazonServiceException { + public AccessControlList getObjectAcl(String bucketName, String key, String versionId) throws AmazonClientException, + AmazonServiceException { return delegate.getObjectAcl(bucketName, key, versionId); } @@ -283,20 +289,20 @@ public void setObjectAcl(String bucketName, String key, AccessControlList acl) t } @Override - public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) - throws AmazonClientException, AmazonServiceException { + public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) throws AmazonClientException, + AmazonServiceException { delegate.setObjectAcl(bucketName, key, acl); } @Override - public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl) - throws AmazonClientException, AmazonServiceException { + public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl) throws AmazonClientException, + AmazonServiceException { delegate.setObjectAcl(bucketName, key, versionId, acl); } @Override - public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) - throws AmazonClientException, AmazonServiceException { + public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) throws AmazonClientException, + AmazonServiceException { delegate.setObjectAcl(bucketName, key, versionId, acl); } @@ -336,8 +342,8 @@ public ObjectMetadata getObjectMetadata(String bucketName, String key) throws Am } @Override - public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) - throws AmazonClientException, AmazonServiceException { + public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) throws AmazonClientException, + AmazonServiceException { return delegate.getObjectMetadata(getObjectMetadataRequest); } @@ -352,8 +358,8 @@ public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClient } @Override - public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) - throws AmazonClientException, AmazonServiceException { + public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) throws AmazonClientException, + AmazonServiceException { return delegate.getObject(getObjectRequest, destinationFile); } @@ -369,19 +375,19 @@ public void deleteBucket(String bucketName) throws AmazonClientException, Amazon @Override public void setBucketReplicationConfiguration(String bucketName, BucketReplicationConfiguration configuration) - throws AmazonServiceException, AmazonClientException { + throws AmazonServiceException, AmazonClientException { delegate.setBucketReplicationConfiguration(bucketName, configuration); } @Override public void setBucketReplicationConfiguration(SetBucketReplicationConfigurationRequest setBucketReplicationConfigurationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonServiceException, AmazonClientException { delegate.setBucketReplicationConfiguration(setBucketReplicationConfigurationRequest); } @Override - public BucketReplicationConfiguration getBucketReplicationConfiguration(String bucketName) - throws AmazonServiceException, AmazonClientException { + public BucketReplicationConfiguration getBucketReplicationConfiguration(String bucketName) throws AmazonServiceException, + AmazonClientException { return delegate.getBucketReplicationConfiguration(bucketName); } @@ -391,8 +397,8 @@ public void deleteBucketReplicationConfiguration(String bucketName) throws Amazo } @Override - public void deleteBucketReplicationConfiguration(DeleteBucketReplicationConfigurationRequest request) - throws AmazonServiceException, AmazonClientException { + public void deleteBucketReplicationConfiguration(DeleteBucketReplicationConfigurationRequest request) throws AmazonServiceException, + AmazonClientException { delegate.deleteBucketReplicationConfiguration(request); } @@ -413,13 +419,13 @@ public PutObjectResult putObject(String bucketName, String key, File file) throw @Override public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { return delegate.putObject(bucketName, key, input, metadata); } @Override public CopyObjectResult copyObject(String sourceBucketName, String sourceKey, String destinationBucketName, String destinationKey) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { return delegate.copyObject(sourceBucketName, sourceKey, destinationBucketName, destinationKey); } @@ -444,8 +450,8 @@ public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws AmazonC } @Override - public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) - throws AmazonClientException, AmazonServiceException { + public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AmazonClientException, + AmazonServiceException { return delegate.deleteObjects(deleteObjectsRequest); } @@ -460,26 +466,26 @@ public void deleteVersion(DeleteVersionRequest deleteVersionRequest) throws Amaz } @Override - public BucketLoggingConfiguration getBucketLoggingConfiguration(String bucketName) - throws AmazonClientException, AmazonServiceException { + public BucketLoggingConfiguration getBucketLoggingConfiguration(String bucketName) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketLoggingConfiguration(bucketName); } @Override public void setBucketLoggingConfiguration(SetBucketLoggingConfigurationRequest setBucketLoggingConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.setBucketLoggingConfiguration(setBucketLoggingConfigurationRequest); } @Override - public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName) - throws AmazonClientException, AmazonServiceException { + public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketVersioningConfiguration(bucketName); } @Override public void setBucketVersioningConfiguration(SetBucketVersioningConfigurationRequest setBucketVersioningConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.setBucketVersioningConfiguration(setBucketVersioningConfigurationRequest); } @@ -530,7 +536,8 @@ public void deleteBucketCrossOriginConfiguration(String bucketName) { @Override public void deleteBucketCrossOriginConfiguration( - DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest) { + DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest + ) { delegate.deleteBucketCrossOriginConfiguration(deleteBucketCrossOriginConfigurationRequest); } @@ -560,45 +567,45 @@ public void deleteBucketTaggingConfiguration(DeleteBucketTaggingConfigurationReq } @Override - public BucketNotificationConfiguration getBucketNotificationConfiguration(String bucketName) - throws AmazonClientException, AmazonServiceException { + public BucketNotificationConfiguration getBucketNotificationConfiguration(String bucketName) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketNotificationConfiguration(bucketName); } @Override public void setBucketNotificationConfiguration(SetBucketNotificationConfigurationRequest setBucketNotificationConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.setBucketNotificationConfiguration(setBucketNotificationConfigurationRequest); } @Override public void setBucketNotificationConfiguration(String bucketName, BucketNotificationConfiguration bucketNotificationConfiguration) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.setBucketNotificationConfiguration(bucketName, bucketNotificationConfiguration); } @Override - public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName) - throws AmazonClientException, AmazonServiceException { + public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketWebsiteConfiguration(bucketName); } @Override public BucketWebsiteConfiguration getBucketWebsiteConfiguration( - GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest + ) throws AmazonClientException, AmazonServiceException { return delegate.getBucketWebsiteConfiguration(getBucketWebsiteConfigurationRequest); } @Override - public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration) - throws AmazonClientException, AmazonServiceException { + public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration) throws AmazonClientException, + AmazonServiceException { delegate.setBucketWebsiteConfiguration(bucketName, configuration); } @Override public void setBucketWebsiteConfiguration(SetBucketWebsiteConfigurationRequest setBucketWebsiteConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.setBucketWebsiteConfiguration(setBucketWebsiteConfigurationRequest); } @@ -609,7 +616,7 @@ public void deleteBucketWebsiteConfiguration(String bucketName) throws AmazonCli @Override public void deleteBucketWebsiteConfiguration(DeleteBucketWebsiteConfigurationRequest deleteBucketWebsiteConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { delegate.deleteBucketWebsiteConfiguration(deleteBucketWebsiteConfigurationRequest); } @@ -619,8 +626,8 @@ public BucketPolicy getBucketPolicy(String bucketName) throws AmazonClientExcept } @Override - public BucketPolicy getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) - throws AmazonClientException, AmazonServiceException { + public BucketPolicy getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AmazonClientException, + AmazonServiceException { return delegate.getBucketPolicy(getBucketPolicyRequest); } @@ -640,8 +647,8 @@ public void deleteBucketPolicy(String bucketName) throws AmazonClientException, } @Override - public void deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) - throws AmazonClientException, AmazonServiceException { + public void deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AmazonClientException, + AmazonServiceException { delegate.deleteBucketPolicy(deleteBucketPolicyRequest); } @@ -661,8 +668,8 @@ public URL generatePresignedUrl(GeneratePresignedUrlRequest generatePresignedUrl } @Override - public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) - throws AmazonClientException, AmazonServiceException { + public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException, + AmazonServiceException { return delegate.initiateMultipartUpload(request); } @@ -682,14 +689,14 @@ public void abortMultipartUpload(AbortMultipartUploadRequest request) throws Ama } @Override - public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) - throws AmazonClientException, AmazonServiceException { + public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, + AmazonServiceException { return delegate.completeMultipartUpload(request); } @Override - public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest request) - throws AmazonClientException, AmazonServiceException { + public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest request) throws AmazonClientException, + AmazonServiceException { return delegate.listMultipartUploads(request); } @@ -724,14 +731,14 @@ public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceExc } @Override - public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) - throws AmazonClientException, AmazonServiceException { + public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) throws AmazonClientException, + AmazonServiceException { return delegate.listNextBatchOfObjects(listNextBatchOfObjectsRequest); } @Override public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest) - throws AmazonClientException, AmazonServiceException { + throws AmazonClientException, AmazonServiceException { return delegate.listNextBatchOfVersions(listNextBatchOfVersionsRequest); } @@ -742,47 +749,50 @@ public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest @Override public BucketLoggingConfiguration getBucketLoggingConfiguration( - GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest + ) throws AmazonClientException, AmazonServiceException { return delegate.getBucketLoggingConfiguration(getBucketLoggingConfigurationRequest); } @Override public BucketVersioningConfiguration getBucketVersioningConfiguration( - GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest + ) throws AmazonClientException, AmazonServiceException { return delegate.getBucketVersioningConfiguration(getBucketVersioningConfigurationRequest); } @Override public BucketLifecycleConfiguration getBucketLifecycleConfiguration( - GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) { + GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest + ) { return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest); } @Override public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration( - GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest) { + GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest + ) { return delegate.getBucketCrossOriginConfiguration(getBucketCrossOriginConfigurationRequest); } @Override public BucketTaggingConfiguration getBucketTaggingConfiguration( - GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest) { + GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest + ) { return delegate.getBucketTaggingConfiguration(getBucketTaggingConfigurationRequest); } @Override public BucketNotificationConfiguration getBucketNotificationConfiguration( - GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest) - throws AmazonClientException, AmazonServiceException { + GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest + ) throws AmazonClientException, AmazonServiceException { return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest); } @Override public BucketReplicationConfiguration getBucketReplicationConfiguration( - GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest) - throws AmazonServiceException, AmazonClientException { + GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest + ) throws AmazonServiceException, AmazonClientException { return delegate.getBucketReplicationConfiguration(getBucketReplicationConfigurationRequest); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java index cbae2fe880a35..0f1bfdf7b7d6b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java @@ -120,8 +120,17 @@ public void testCredentialsIncomplete() { } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, - ClientConfiguration.DEFAULT_THROTTLE_RETRIES, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); + launchAWSConfigurationTest( + Settings.EMPTY, + Protocol.HTTPS, + null, + -1, + null, + null, + 3, + ClientConfiguration.DEFAULT_THROTTLE_RETRIES, + ClientConfiguration.DEFAULT_SOCKET_TIMEOUT + ); } public void testAWSConfigurationWithAwsSettings() { @@ -135,16 +144,22 @@ public void testAWSConfigurationWithAwsSettings() { .put("s3.client.default.proxy.port", 8080) .put("s3.client.default.read_timeout", "10s") .build(); - launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", - "aws_proxy_password", 3, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 10000); + launchAWSConfigurationTest( + settings, + Protocol.HTTP, + "aws_proxy_host", + 8080, + "aws_proxy_username", + "aws_proxy_password", + 3, + ClientConfiguration.DEFAULT_THROTTLE_RETRIES, + 10000 + ); } public void testRepositoryMaxRetries() { - final Settings settings = Settings.builder() - .put("s3.client.default.max_retries", 5) - .build(); - launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, - null, 5, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 50000); + final Settings settings = Settings.builder().put("s3.client.default.max_retries", 5).build(); + launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 5, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 50000); } public void testRepositoryThrottleRetries() { @@ -154,15 +169,17 @@ public void testRepositoryThrottleRetries() { launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); } - private void launchAWSConfigurationTest(Settings settings, - Protocol expectedProtocol, - String expectedProxyHost, - int expectedProxyPort, - String expectedProxyUsername, - String expectedProxyPassword, - Integer expectedMaxRetries, - boolean expectedUseThrottleRetries, - int expectedReadTimeout) { + private void launchAWSConfigurationTest( + Settings settings, + Protocol expectedProtocol, + String expectedProxyHost, + int expectedProxyPort, + String expectedProxyUsername, + String expectedProxyPassword, + Integer expectedMaxRetries, + boolean expectedUseThrottleRetries, + int expectedReadTimeout + ) { final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings); @@ -179,9 +196,7 @@ private void launchAWSConfigurationTest(Settings settings, } public void testEndpointSetting() { - final Settings settings = Settings.builder() - .put("s3.client.default.endpoint", "s3.endpoint") - .build(); + final Settings settings = Settings.builder().put("s3.client.default.endpoint", "s3.endpoint").build(); assertEndpoint(Settings.EMPTY, settings, "s3.endpoint"); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 9fe887c0cdd0d..645fe5cf1d134 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -102,10 +102,7 @@ protected Settings nodeSettings() { secureSettings.setString(ACCESS_KEY_SETTING.getConcreteSettingForNamespace("other").getKey(), "secure_other_key"); secureSettings.setString(SECRET_KEY_SETTING.getConcreteSettingForNamespace("other").getKey(), "secure_other_secret"); - return Settings.builder() - .setSecureSettings(secureSettings) - .put(super.nodeSettings()) - .build(); + return Settings.builder().setSecureSettings(secureSettings).put(super.nodeSettings()).build(); } public void testRepositoryCredentialsOverrideSecureCredentials() { @@ -139,7 +136,8 @@ public void testRepositoryCredentialsOverrideSecureCredentials() { "Using s3 access/secret key from repository settings. Instead store these in named clients and" + " the opensearch keystore for secure settings.", "[access_key] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version."); + + " See the breaking changes documentation for the next major version." + ); } public void testReinitSecureCredentials() { @@ -224,16 +222,20 @@ public void testReinitSecureCredentials() { "Using s3 access/secret key from repository settings. Instead store these in named clients and" + " the opensearch keystore for secure settings.", "[access_key] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version."); + + " See the breaking changes documentation for the next major version." + ); } } public void testInsecureRepositoryCredentials() throws Exception { final String repositoryName = "repo-insecure-creds"; - createRepository(repositoryName, Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") - .build()); + createRepository( + repositoryName, + Settings.builder() + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") + .build() + ); final RestRequest fakeRestRequest = new FakeRestRequest(); fakeRestRequest.params().put("repository", repositoryName); @@ -262,14 +264,19 @@ public void sendResponse(RestResponse response) { assertWarnings( "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the opensearch keystore for secure settings."); + + " the opensearch keystore for secure settings." + ); } private void createRepository(final String name, final Settings repositorySettings) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(S3Repository.TYPE) - .setVerify(false) - .setSettings(repositorySettings)); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(name) + .setType(S3Repository.TYPE) + .setVerify(false) + .setSettings(repositorySettings) + ); } /** @@ -282,9 +289,12 @@ public ProxyS3RepositoryPlugin(Settings settings) { } @Override - protected S3Repository createRepository(RepositoryMetadata metadata, - NamedXContentRegistry registry, ClusterService clusterService, - RecoverySettings recoverySettings) { + protected S3Repository createRepository( + RepositoryMetadata metadata, + NamedXContentRegistry registry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { return new S3Repository(metadata, registry, service, clusterService, recoverySettings) { @Override protected void assertSnapshotOrGenericThread() { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index 722adb604d698..4f4ec0afcf9f4 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -113,10 +113,12 @@ protected Class unresponsiveExceptionType() { } @Override - protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, - final @Nullable TimeValue readTimeout, - final @Nullable Boolean disableChunkedEncoding, - final @Nullable ByteSizeValue bufferSize) { + protected BlobContainer createBlobContainer( + final @Nullable Integer maxRetries, + final @Nullable TimeValue readTimeout, + final @Nullable Boolean disableChunkedEncoding, + final @Nullable ByteSizeValue bufferSize + ) { final Settings.Builder clientSettings = Settings.builder(); final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); @@ -140,24 +142,33 @@ protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, clientSettings.setSecureSettings(secureSettings); service.refreshAndClearCache(S3ClientSettings.load(clientSettings.build())); - final RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repository", S3Repository.TYPE, - Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build()); - - return new S3BlobContainer(BlobPath.cleanPath(), new S3BlobStore(service, "bucket", - S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), - bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, - S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), - S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), - repositoryMetadata)) { - @Override - public InputStream readBlob(String blobName) throws IOException { - return new AssertingInputStream(super.readBlob(blobName), blobName); - } + final RepositoryMetadata repositoryMetadata = new RepositoryMetadata( + "repository", + S3Repository.TYPE, + Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build() + ); + + return new S3BlobContainer( + BlobPath.cleanPath(), + new S3BlobStore( + service, + "bucket", + S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), + bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, + S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), + S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + repositoryMetadata + ) + ) { + @Override + public InputStream readBlob(String blobName) throws IOException { + return new AssertingInputStream(super.readBlob(blobName), blobName); + } - @Override - public InputStream readBlob(String blobName, long position, long length) throws IOException { - return new AssertingInputStream(super.readBlob(blobName, position, length), blobName, position, length); - } + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return new AssertingInputStream(super.readBlob(blobName, position, length), blobName, position, length); + } }; } @@ -184,8 +195,15 @@ public void testWriteBlobWithRetries() throws Exception { Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]); } else { Streams.readFully(exchange.getRequestBody()); - exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, - HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + exchange.sendResponseHeaders( + randomFrom( + HttpStatus.SC_INTERNAL_SERVER_ERROR, + HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, + HttpStatus.SC_GATEWAY_TIMEOUT + ), + -1 + ); } } exchange.close(); @@ -220,8 +238,10 @@ public void testWriteBlobWithReadTimeouts() { blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false); } }); - assertThat(exception.getMessage().toLowerCase(Locale.ROOT), - containsString("unable to upload object [write_blob_timeout] using a single upload")); + assertThat( + exception.getMessage().toLowerCase(Locale.ROOT), + containsString("unable to upload object [write_blob_timeout] using a single upload") + ); assertThat(exception.getCause(), instanceOf(SdkClientException.class)); assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); @@ -248,16 +268,15 @@ public void testWriteLargeBlob() throws Exception { httpServer.createContext("/bucket/write_large_blob", exchange -> { final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length")); - if ("POST".equals(exchange.getRequestMethod()) - && exchange.getRequestURI().getQuery().equals("uploads")) { + if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploads")) { // initiate multipart upload request if (countDownInitiate.countDown()) { - byte[] response = ("\n" + - "\n" + - " bucket\n" + - " write_large_blob\n" + - " TEST\n" + - "").getBytes(StandardCharsets.UTF_8); + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob\n" + + " TEST\n" + + "").getBytes(StandardCharsets.UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/xml"); exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); exchange.getResponseBody().write(response); @@ -267,36 +286,35 @@ public void testWriteLargeBlob() throws Exception { } else if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().contains("uploadId=TEST") && exchange.getRequestURI().getQuery().contains("partNumber=")) { - // upload part request - MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); - BytesReference bytes = Streams.readFully(md5); - assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); - assertThat(contentLength, anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); - - if (countDownUploads.decrementAndGet() % 2 == 0) { - exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); - exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); - exchange.close(); - return; - } + // upload part request + MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); + BytesReference bytes = Streams.readFully(md5); + assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); + assertThat(contentLength, anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); + + if (countDownUploads.decrementAndGet() % 2 == 0) { + exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); + exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); + exchange.close(); + return; + } - } else if ("POST".equals(exchange.getRequestMethod()) - && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) { - // complete multipart upload request - if (countDownComplete.countDown()) { - Streams.readFully(exchange.getRequestBody()); - byte[] response = ("\n" + - "\n" + - " bucket\n" + - " write_large_blob\n" + - "").getBytes(StandardCharsets.UTF_8); - exchange.getResponseHeaders().add("Content-Type", "application/xml"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); - exchange.getResponseBody().write(response); - exchange.close(); - return; + } else if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) { + // complete multipart upload request + if (countDownComplete.countDown()) { + Streams.readFully(exchange.getRequestBody()); + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } } - } // sends an error back or let the request time out if (useTimeout == false) { @@ -304,8 +322,15 @@ public void testWriteLargeBlob() throws Exception { Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))]); } else { Streams.readFully(exchange.getRequestBody()); - exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, - HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + exchange.sendResponseHeaders( + randomFrom( + HttpStatus.SC_INTERNAL_SERVER_ERROR, + HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, + HttpStatus.SC_GATEWAY_TIMEOUT + ), + -1 + ); } exchange.close(); } @@ -360,8 +385,16 @@ public void close() throws IOException { super.close(); if (in instanceof S3RetryingInputStream) { final S3RetryingInputStream s3Stream = (S3RetryingInputStream) in; - assertTrue("Stream " + toString() + " should have reached EOF or should have been aborted but got [eof=" + s3Stream.isEof() - + ", aborted=" + s3Stream.isAborted() + ']', s3Stream.isEof() || s3Stream.isAborted()); + assertTrue( + "Stream " + + toString() + + " should have reached EOF or should have been aborted but got [eof=" + + s3Stream.isEof() + + ", aborted=" + + s3Stream.isAborted() + + ']', + s3Stream.isEof() || s3Stream.isAborted() + ); } else { assertThat(in, instanceOf(ByteArrayInputStream.class)); assertThat(((ByteArrayInputStream) in).available(), equalTo(0)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index ea00ea7751ced..18faa738ad8b0 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -79,8 +79,10 @@ public void testExecuteSingleUploadBlobSizeTooLarge() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize)); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + ); assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } @@ -91,8 +93,10 @@ public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() { final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); final String blobName = randomAlphaOfLengthBetween(1, 10); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2))); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2)) + ); assertEquals("Upload request size [2097152] can't be larger than buffer size", e.getMessage()); } @@ -152,8 +156,9 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); } @@ -163,8 +168,9 @@ public void testExecuteMultipartUploadBlobSizeTooSmall() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage()); } @@ -179,7 +185,7 @@ public void testExecuteMultipartUpload() throws IOException { } final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(1, 128)); - final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024)); + final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024)); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); @@ -253,7 +259,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(i + 1, uploadRequest.getPartNumber()); assertEquals(inputStream, uploadRequest.getInputStream()); - if (i == (uploadRequests.size() -1)) { + if (i == (uploadRequests.size() - 1)) { assertTrue(uploadRequest.isLastPart()); assertEquals(numberOfParts.v2().longValue(), uploadRequest.getPartSize()); } else { @@ -277,7 +283,7 @@ public void testExecuteMultipartUploadAborted() { final BlobPath blobPath = new BlobPath(); final long blobSize = ByteSizeUnit.MB.toBytes(765); - final long bufferSize = ByteSizeUnit.MB.toBytes(150); + final long bufferSize = ByteSizeUnit.MB.toBytes(150); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); @@ -302,8 +308,7 @@ public void testExecuteMultipartUploadAborted() { if (stage == 0) { // Fail the initialization request - when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))) - .thenThrow(exceptions.get(stage)); + when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenThrow(exceptions.get(stage)); } else if (stage == 1) { final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); @@ -311,8 +316,7 @@ public void testExecuteMultipartUploadAborted() { when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(initResult); // Fail the upload part request - when(client.uploadPart(any(UploadPartRequest.class))) - .thenThrow(exceptions.get(stage)); + when(client.uploadPart(any(UploadPartRequest.class))).thenThrow(exceptions.get(stage)); } else { final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); @@ -328,8 +332,7 @@ public void testExecuteMultipartUploadAborted() { }); // Fail the completion request - when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) - .thenThrow(exceptions.get(stage)); + when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenThrow(exceptions.get(stage)); } final ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class); @@ -371,8 +374,10 @@ public void testExecuteMultipartUploadAborted() { } public void testNumberOfMultipartsWithZeroPartSize() { - final IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> S3BlobContainer.numberOfMultiparts(randomNonNegativeLong(), 0L)); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> S3BlobContainer.numberOfMultiparts(randomNonNegativeLong(), 0L) + ); assertEquals("Part size must be greater than zero", e.getMessage()); } @@ -397,11 +402,16 @@ public void testNumberOfMultiparts() { } public void testInitCannedACL() { - String[] aclList = new String[]{ - "private", "public-read", "public-read-write", "authenticated-read", - "log-delivery-write", "bucket-owner-read", "bucket-owner-full-control"}; - - //empty acl + String[] aclList = new String[] { + "private", + "public-read", + "public-read-write", + "authenticated-read", + "log-delivery-write", + "bucket-owner-read", + "bucket-owner-full-control" }; + + // empty acl assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private)); assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 0a7d8e3e0ff6f..ea0b554df880e 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -65,8 +65,9 @@ public void testThereIsADefaultClientByDefault() { } public void testDefaultClientSettingsCanBeSet() { - final Map settings = S3ClientSettings.load(Settings.builder() - .put("s3.client.default.max_retries", 10).build()); + final Map settings = S3ClientSettings.load( + Settings.builder().put("s3.client.default.max_retries", 10).build() + ); assertThat(settings.keySet(), contains("default")); final S3ClientSettings defaultSettings = settings.get("default"); @@ -74,8 +75,9 @@ public void testDefaultClientSettingsCanBeSet() { } public void testNondefaultClientCreatedBySettingItsSettings() { - final Map settings = S3ClientSettings.load(Settings.builder() - .put("s3.client.another_client.max_retries", 10).build()); + final Map settings = S3ClientSettings.load( + Settings.builder().put("s3.client.another_client.max_retries", 10).build() + ); assertThat(settings.keySet(), contains("default", "another_client")); final S3ClientSettings defaultSettings = settings.get("default"); @@ -88,24 +90,30 @@ public void testNondefaultClientCreatedBySettingItsSettings() { public void testRejectionOfLoneAccessKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.access_key", "aws_key"); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()) + ); assertThat(e.getMessage(), is("Missing secret key for s3 client [default]")); } public void testRejectionOfLoneSecretKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.secret_key", "aws_key"); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()) + ); assertThat(e.getMessage(), is("Missing access key for s3 client [default]")); } public void testRejectionOfLoneSessionToken() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.session_token", "aws_key"); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()) + ); assertThat(e.getMessage(), is("Missing access key and secret key for s3 client [default]")); } @@ -138,8 +146,8 @@ public void testRefineWithRepoSettings() { secureSettings.setString("s3.client.default.access_key", "access_key"); secureSettings.setString("s3.client.default.secret_key", "secret_key"); secureSettings.setString("s3.client.default.session_token", "session_token"); - final S3ClientSettings baseSettings = S3ClientSettings.load( - Settings.builder().setSecureSettings(secureSettings).build()).get("default"); + final S3ClientSettings baseSettings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()) + .get("default"); { final S3ClientSettings refinedSettings = baseSettings.refine(Settings.EMPTY); @@ -168,14 +176,16 @@ public void testRefineWithRepoSettings() { public void testPathStyleAccessCanBeSet() { final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.path_style_access", true).build()); + Settings.builder().put("s3.client.other.path_style_access", true).build() + ); assertThat(settings.get("default").pathStyleAccess, is(false)); assertThat(settings.get("other").pathStyleAccess, is(true)); } public void testUseChunkedEncodingCanBeSet() { final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.disable_chunked_encoding", true).build()); + Settings.builder().put("s3.client.other.disable_chunked_encoding", true).build() + ); assertThat(settings.get("default").disableChunkedEncoding, is(false)); assertThat(settings.get("other").disableChunkedEncoding, is(true)); } @@ -183,7 +193,8 @@ public void testUseChunkedEncodingCanBeSet() { public void testRegionCanBeSet() { final String region = randomAlphaOfLength(5); final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.region", region).build()); + Settings.builder().put("s3.client.other.region", region).build() + ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").region, is(region)); try (S3Service s3Service = new S3Service()) { @@ -195,7 +206,8 @@ public void testRegionCanBeSet() { public void testSignerOverrideCanBeSet() { final String signerOverride = randomAlphaOfLength(5); final Map settings = S3ClientSettings.load( - Settings.builder().put("s3.client.other.signer_override", signerOverride).build()); + Settings.builder().put("s3.client.other.signer_override", signerOverride).build() + ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").signerOverride, is(signerOverride)); ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index 0d5c98973bfc2..da1f5d71b4da5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -69,19 +69,16 @@ public AmazonS3Reference client(RepositoryMetadata repositoryMetadata) { } @Override - public void refreshAndClearCache(Map clientsSettings) { - } + public void refreshAndClearCache(Map clientsSettings) {} @Override - public void close() { - } + public void close() {} } public void testInvalidChunkBufferSizeSettings() { // chunk < buffer should fail final Settings s1 = bufferAndChunkSettings(10, 5); - final Exception e1 = expectThrows(RepositoryException.class, - () -> createS3Repo(getRepositoryMetadata(s1))); + final Exception e1 = expectThrows(RepositoryException.class, () -> createS3Repo(getRepositoryMetadata(s1))); assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)")); // chunk > buffer should pass final Settings s2 = bufferAndChunkSettings(5, 10); @@ -91,22 +88,24 @@ public void testInvalidChunkBufferSizeSettings() { createS3Repo(getRepositoryMetadata(s3)).close(); // buffer < 5mb should fail final Settings s4 = bufferAndChunkSettings(4, 10); - final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, - () -> createS3Repo(getRepositoryMetadata(s4)) - .close()); + final IllegalArgumentException e2 = expectThrows( + IllegalArgumentException.class, + () -> createS3Repo(getRepositoryMetadata(s4)).close() + ); assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]")); final Settings s5 = bufferAndChunkSettings(5, 6000000); - final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class, - () -> createS3Repo(getRepositoryMetadata(s5)) - .close()); + final IllegalArgumentException e3 = expectThrows( + IllegalArgumentException.class, + () -> createS3Repo(getRepositoryMetadata(s5)).close() + ); assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]")); } private Settings bufferAndChunkSettings(long buffer, long chunk) { return Settings.builder() - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) - .build(); + .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) + .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) + .build(); } private RepositoryMetadata getRepositoryMetadata(Settings settings) { @@ -114,8 +113,11 @@ private RepositoryMetadata getRepositoryMetadata(Settings settings) { } public void testBasePathSetting() { - final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); + final RepositoryMetadata metadata = new RepositoryMetadata( + "dummy-repo", + "mock", + Settings.builder().put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build() + ); try (S3Repository s3repo = createS3Repo(metadata)) { assertEquals("foo/bar/", s3repo.basePath().buildAsString()); } @@ -126,7 +128,7 @@ public void testDefaultBufferSize() { try (S3Repository s3repo = createS3Repo(metadata)) { assertThat(s3repo.getBlobStore(), is(nullValue())); s3repo.start(); - final long defaultBufferSize = ((S3BlobStore)s3repo.blobStore()).bufferSizeInBytes(); + final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes(); assertThat(s3repo.getBlobStore(), not(nullValue())); assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); @@ -134,8 +136,13 @@ public void testDefaultBufferSize() { } private S3Repository createS3Repo(RepositoryMetadata metadata) { - return new S3Repository(metadata, NamedXContentRegistry.EMPTY, new DummyS3Service(), BlobStoreTestUtil.mockClusterService(), - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + return new S3Repository( + metadata, + NamedXContentRegistry.EMPTY, + new DummyS3Service(), + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java index b5defd1891fa7..f7877ad3363bb 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java @@ -103,11 +103,8 @@ public void testRangeInputStreamIsAborted() throws IOException { assertThat(stream.isAborted(), is(true)); } - private S3RetryingInputStream createInputStream( - final byte[] data, - @Nullable final Integer position, - @Nullable final Integer length - ) throws IOException { + private S3RetryingInputStream createInputStream(final byte[] data, @Nullable final Integer position, @Nullable final Integer length) + throws IOException { final S3Object s3Object = new S3Object(); final AmazonS3 client = mock(AmazonS3.class); when(client.getObject(any(GetObjectRequest.class))).thenReturn(s3Object); diff --git a/plugins/repository-s3/src/yamlRestTest/java/org/opensearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/plugins/repository-s3/src/yamlRestTest/java/org/opensearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 727cbc3eb85e9..5e9976b2bf659 100644 --- a/plugins/repository-s3/src/yamlRestTest/java/org/opensearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/plugins/repository-s3/src/yamlRestTest/java/org/opensearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/AbstractAzureFsTestCase.java b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/AbstractAzureFsTestCase.java index 1dd34a0ac8613..ab3ffe97199ed 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/AbstractAzureFsTestCase.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/AbstractAzureFsTestCase.java @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.is; -public abstract class AbstractAzureFsTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractAzureFsTestCase extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { return Arrays.asList(SMBStorePlugin.class); diff --git a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbMMapFsTests.java b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbMMapFsTests.java index 2603e786de0b1..70da2f2d1a821 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbMMapFsTests.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbMMapFsTests.java @@ -34,15 +34,11 @@ import org.opensearch.common.settings.Settings; - public class SmbMMapFsTests extends AbstractAzureFsTestCase { @Override public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.store.type", "smb_mmap_fs") - .build(); + return Settings.builder().put(super.indexSettings()).put("index.store.type", "smb_mmap_fs").build(); } } diff --git a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbNIOFsTests.java b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbNIOFsTests.java index c2d6087dd9aed..6610d8f704ea3 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbNIOFsTests.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbNIOFsTests.java @@ -16,9 +16,6 @@ public class SmbNIOFsTests extends AbstractAzureFsTestCase { @Override public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.store.type", "smb_nio_fs") - .build(); + return Settings.builder().put(super.indexSettings()).put("index.store.type", "smb_nio_fs").build(); } } diff --git a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbSimpleFsTests.java b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbSimpleFsTests.java index a31e4d34560a7..266801829d4a9 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbSimpleFsTests.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/opensearch/index/store/SmbSimpleFsTests.java @@ -34,13 +34,9 @@ import org.opensearch.common.settings.Settings; - public class SmbSimpleFsTests extends AbstractAzureFsTestCase { @Override public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.store.type", "smb_simple_fs") - .build(); + return Settings.builder().put(super.indexSettings()).put("index.store.type", "smb_simple_fs").build(); } } diff --git a/plugins/store-smb/src/main/java/org/opensearch/index/store/SmbDirectoryWrapper.java b/plugins/store-smb/src/main/java/org/opensearch/index/store/SmbDirectoryWrapper.java index 6c208dd1bc730..b50d45f68f7c6 100644 --- a/plugins/store-smb/src/main/java/org/opensearch/index/store/SmbDirectoryWrapper.java +++ b/plugins/store-smb/src/main/java/org/opensearch/index/store/SmbDirectoryWrapper.java @@ -75,22 +75,33 @@ final class SmbFSIndexOutput extends OutputStreamIndexOutput { static final int CHUNK_SIZE = 8192; SmbFSIndexOutput(String name) throws IOException { - super("SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", name, - new FilterOutputStream(Channels.newOutputStream(Files.newByteChannel(fsDirectory.getDirectory().resolve(name), - StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.READ, StandardOpenOption.WRITE))) { - // This implementation ensures, that we never write more than CHUNK_SIZE bytes: - @Override - public void write(byte[] b, int offset, int length) throws IOException { - while (length > 0) { - final int chunk = Math.min(length, CHUNK_SIZE); - out.write(b, offset, chunk); - length -= chunk; - offset += chunk; - } + super( + "SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", + name, + new FilterOutputStream( + Channels.newOutputStream( + Files.newByteChannel( + fsDirectory.getDirectory().resolve(name), + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.READ, + StandardOpenOption.WRITE + ) + ) + ) { + // This implementation ensures, that we never write more than CHUNK_SIZE bytes: + @Override + public void write(byte[] b, int offset, int length) throws IOException { + while (length > 0) { + final int chunk = Math.min(length, CHUNK_SIZE); + out.write(b, offset, chunk); + length -= chunk; + offset += chunk; } - }, - CHUNK_SIZE); + } + }, + CHUNK_SIZE + ); } } diff --git a/plugins/store-smb/src/main/java/org/opensearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java b/plugins/store-smb/src/main/java/org/opensearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java index 9d9302016b7b2..286688f61e658 100644 --- a/plugins/store-smb/src/main/java/org/opensearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java +++ b/plugins/store-smb/src/main/java/org/opensearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java @@ -48,7 +48,12 @@ public final class SmbMmapFsDirectoryFactory extends FsDirectoryFactory { @Override protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { - return new SmbDirectoryWrapper(setPreload(new MMapDirectory(location, lockFactory), lockFactory, new HashSet<>( - indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)))); + return new SmbDirectoryWrapper( + setPreload( + new MMapDirectory(location, lockFactory), + lockFactory, + new HashSet<>(indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)) + ) + ); } } diff --git a/plugins/store-smb/src/main/java/org/opensearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java b/plugins/store-smb/src/main/java/org/opensearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java index 1ae16eb9b6fef..83f176e99dde1 100644 --- a/plugins/store-smb/src/main/java/org/opensearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java +++ b/plugins/store-smb/src/main/java/org/opensearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java @@ -56,8 +56,10 @@ public final class SmbSimpleFsDirectoryFactory extends FsDirectoryFactory { @Override protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { - DEPRECATION_LOGGER.deprecate(IndexModule.Type.SIMPLEFS.getSettingsKey(), IndexModule.Type.SIMPLEFS.getSettingsKey() - + " is deprecated and will be removed in 2.0"); + DEPRECATION_LOGGER.deprecate( + IndexModule.Type.SIMPLEFS.getSettingsKey(), + IndexModule.Type.SIMPLEFS.getSettingsKey() + " is deprecated and will be removed in 2.0" + ); return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/opensearch/plugin/store/smb/SMBStorePlugin.java b/plugins/store-smb/src/main/java/org/opensearch/plugin/store/smb/SMBStorePlugin.java index 41116fb956b2d..22fb6de82999f 100644 --- a/plugins/store-smb/src/main/java/org/opensearch/plugin/store/smb/SMBStorePlugin.java +++ b/plugins/store-smb/src/main/java/org/opensearch/plugin/store/smb/SMBStorePlugin.java @@ -52,8 +52,10 @@ public class SMBStorePlugin extends Plugin implements IndexStorePlugin { public Map getDirectoryFactories() { final Map indexStoreFactories = new HashMap<>(2); indexStoreFactories.put("smb_mmap_fs", new SmbMmapFsDirectoryFactory()); - DEPRECATION_LOGGER.deprecate(IndexModule.Type.SIMPLEFS.getSettingsKey(), IndexModule.Type.SIMPLEFS.getSettingsKey() - + " is deprecated and will be removed in 2.0"); + DEPRECATION_LOGGER.deprecate( + IndexModule.Type.SIMPLEFS.getSettingsKey(), + IndexModule.Type.SIMPLEFS.getSettingsKey() + " is deprecated and will be removed in 2.0" + ); indexStoreFactories.put("smb_simple_fs", new SmbSimpleFsDirectoryFactory()); indexStoreFactories.put("smb_nio_fs", new SmbNIOFsDirectoryFactory()); return Collections.unmodifiableMap(indexStoreFactories); diff --git a/plugins/store-smb/src/yamlRestTest/java/org/opensearch/index/store/StoreSmbClientYamlTestSuiteIT.java b/plugins/store-smb/src/yamlRestTest/java/org/opensearch/index/store/StoreSmbClientYamlTestSuiteIT.java index fa2a500363100..40727709ea954 100644 --- a/plugins/store-smb/src/yamlRestTest/java/org/opensearch/index/store/StoreSmbClientYamlTestSuiteIT.java +++ b/plugins/store-smb/src/yamlRestTest/java/org/opensearch/index/store/StoreSmbClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/NioIntegTestCase.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/NioIntegTestCase.java index c06bca907476d..02e1efabd24a4 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/NioIntegTestCase.java @@ -69,7 +69,7 @@ protected Settings nodeSettings(int nodeOrdinal) { protected Settings transportClientSettings() { Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); - return builder.build(); + return builder.build(); } @Override diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java index 2fcd1568ec3fe..ac06bf03ed8cd 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java @@ -54,7 +54,7 @@ protected boolean addMockHttpTransport() { } public void testThatNioHttpServerSupportsPipelining() throws Exception { - String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"}; + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java index 66c321080f2fd..4c8e711be3367 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java @@ -69,28 +69,32 @@ public void tearDown() throws Exception { @TestLogging(value = "org.opensearch.transport.TransportLogger:trace", reason = "to ensure we log network events on TRACE level") public void testLoggingHandler() { - final String writePattern = - ".*\\[length: \\d+" + - ", request id: \\d+" + - ", type: request" + - ", version: .*" + - ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + - " WRITE: \\d+B"; - final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExpectation( - "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); - - final String readPattern = - ".*\\[length: \\d+" + - ", request id: \\d+" + - ", type: request" + - ", version: .*" + - ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + - " READ: \\d+B"; - - final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExpectation( - "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); + final String writePattern = ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + " WRITE: \\d+B"; + final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation( + "hot threads request", + TransportLogger.class.getCanonicalName(), + Level.TRACE, + writePattern + ); + + final String readPattern = ".*\\[length: \\d+" + + ", request id: \\d+" + + ", type: request" + + ", version: .*" + + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + " READ: \\d+B"; + + final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation( + "hot threads request", + TransportLogger.class.getCanonicalName(), + Level.TRACE, + readPattern + ); appender.addExpectation(writeExpectation); appender.addExpectation(readExpectation); @@ -100,12 +104,22 @@ public void testLoggingHandler() { @TestLogging(value = "org.opensearch.transport.TcpTransport:DEBUG", reason = "to ensure we log connection events on DEBUG level") public void testConnectionLogging() throws IOException { - appender.addExpectation(new MockLogAppender.PatternSeenEventExpectation("open connection log", - TcpTransport.class.getCanonicalName(), Level.DEBUG, - ".*opened transport connection \\[[1-9][0-9]*\\] to .*")); - appender.addExpectation(new MockLogAppender.PatternSeenEventExpectation("close connection log", - TcpTransport.class.getCanonicalName(), Level.DEBUG, - ".*closed transport connection \\[[1-9][0-9]*\\] to .* with age \\[[0-9]+ms\\].*")); + appender.addExpectation( + new MockLogAppender.PatternSeenEventExpectation( + "open connection log", + TcpTransport.class.getCanonicalName(), + Level.DEBUG, + ".*opened transport connection \\[[1-9][0-9]*\\] to .*" + ) + ); + appender.addExpectation( + new MockLogAppender.PatternSeenEventExpectation( + "close connection log", + TcpTransport.class.getCanonicalName(), + Level.DEBUG, + ".*closed transport connection \\[[1-9][0-9]*\\] to .* with age \\[[0-9]+ms\\].*" + ) + ); final String nodeName = internalCluster().startNode(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName)); diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java index af6b30150c60c..1d705bce64852 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java @@ -70,8 +70,13 @@ public class HttpReadWriteHandler implements NioChannelHandler { private boolean requestSinceReadTimeoutTrigger = false; private int inFlightRequests = 0; - public HttpReadWriteHandler(NioHttpChannel nioHttpChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, - TaskScheduler taskScheduler, LongSupplier nanoClock) { + public HttpReadWriteHandler( + NioHttpChannel nioHttpChannel, + NioHttpServerTransport transport, + HttpHandlingSettings settings, + TaskScheduler taskScheduler, + LongSupplier nanoClock + ) { this.nioHttpChannel = nioHttpChannel; this.transport = transport; this.taskScheduler = taskScheduler; @@ -79,8 +84,11 @@ public HttpReadWriteHandler(NioHttpChannel nioHttpChannel, NioHttpServerTranspor this.readTimeoutNanos = TimeUnit.MILLISECONDS.toNanos(settings.getReadTimeoutMillis()); List handlers = new ArrayList<>(8); - HttpRequestDecoder decoder = new HttpRequestDecoder(settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), - settings.getMaxChunkSize()); + HttpRequestDecoder decoder = new HttpRequestDecoder( + settings.getMaxInitialLineLength(), + settings.getMaxHeaderSize(), + settings.getMaxChunkSize() + ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); handlers.add(decoder); handlers.add(new HttpContentDecompressor()); @@ -187,10 +195,16 @@ private void scheduleReadTimeout() { private static boolean assertMessageTypes(Object message) { assert message instanceof HttpPipelinedResponse : "This channel only supports messages that are of type: " - + HttpPipelinedResponse.class + ". Found type: " + message.getClass() + "."; - assert ((HttpPipelinedResponse) message).getDelegateRequest() instanceof NioHttpResponse : - "This channel only pipelined responses with a delegate of type: " + NioHttpResponse.class + - ". Found type: " + ((HttpPipelinedResponse) message).getDelegateRequest().getClass() + "."; + + HttpPipelinedResponse.class + + ". Found type: " + + message.getClass() + + "."; + assert ((HttpPipelinedResponse) message) + .getDelegateRequest() instanceof NioHttpResponse : "This channel only pipelined responses with a delegate of type: " + + NioHttpResponse.class + + ". Found type: " + + ((HttpPipelinedResponse) message).getDelegateRequest().getClass() + + "."; return true; } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java index 9e138d9264ca1..a20bb55458951 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java @@ -56,9 +56,6 @@ public void addCloseListener(ActionListener listener) { @Override public String toString() { - return "NioHttpChannel{" + - "localAddress=" + getLocalAddress() + - ", remoteAddress=" + getRemoteAddress() + - '}'; + return "NioHttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java index 717480dcfd022..73c603ffc059e 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java @@ -68,22 +68,44 @@ public class NioHttpRequest implements HttpRequest { private final boolean pooled; NioHttpRequest(FullHttpRequest request) { - this(request, new HttpHeadersMap(request.headers()), new AtomicBoolean(false), true, - ByteBufUtils.toBytesReference(request.content())); + this( + request, + new HttpHeadersMap(request.headers()), + new AtomicBoolean(false), + true, + ByteBufUtils.toBytesReference(request.content()) + ); } NioHttpRequest(FullHttpRequest request, Exception inboundException) { - this(request, new HttpHeadersMap(request.headers()), new AtomicBoolean(false), true, - ByteBufUtils.toBytesReference(request.content()), inboundException); + this( + request, + new HttpHeadersMap(request.headers()), + new AtomicBoolean(false), + true, + ByteBufUtils.toBytesReference(request.content()), + inboundException + ); } - private NioHttpRequest(FullHttpRequest request, HttpHeadersMap headers, AtomicBoolean released, boolean pooled, - BytesReference content) { + private NioHttpRequest( + FullHttpRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + BytesReference content + ) { this(request, headers, released, pooled, content, null); } - private NioHttpRequest(FullHttpRequest request, HttpHeadersMap headers, AtomicBoolean released, boolean pooled, - BytesReference content, Exception inboundException) { + private NioHttpRequest( + FullHttpRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + BytesReference content, + Exception inboundException + ) { this.request = request; this.headers = headers; this.content = content; @@ -95,17 +117,13 @@ private NioHttpRequest(FullHttpRequest request, HttpHeadersMap headers, AtomicBo @Override public RestRequest.Method method() { HttpMethod httpMethod = request.method(); - if (httpMethod == HttpMethod.GET) - return RestRequest.Method.GET; + if (httpMethod == HttpMethod.GET) return RestRequest.Method.GET; - if (httpMethod == HttpMethod.POST) - return RestRequest.Method.POST; + if (httpMethod == HttpMethod.POST) return RestRequest.Method.POST; - if (httpMethod == HttpMethod.PUT) - return RestRequest.Method.PUT; + if (httpMethod == HttpMethod.PUT) return RestRequest.Method.PUT; - if (httpMethod == HttpMethod.DELETE) - return RestRequest.Method.DELETE; + if (httpMethod == HttpMethod.DELETE) return RestRequest.Method.DELETE; if (httpMethod == HttpMethod.HEAD) { return RestRequest.Method.HEAD; @@ -157,9 +175,19 @@ public HttpRequest releaseAndCopy() { try { final ByteBuf copiedContent = Unpooled.copiedBuffer(request.content()); return new NioHttpRequest( - new DefaultFullHttpRequest(request.protocolVersion(), request.method(), request.uri(), copiedContent, request.headers(), - request.trailingHeaders()), - headers, new AtomicBoolean(false), false, ByteBufUtils.toBytesReference(copiedContent)); + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + copiedContent, + request.headers(), + request.trailingHeaders() + ), + headers, + new AtomicBoolean(false), + false, + ByteBufUtils.toBytesReference(copiedContent) + ); } finally { release(); } @@ -201,8 +229,14 @@ public HttpRequest removeHeader(String header) { HttpHeaders trailingHeaders = new DefaultHttpHeaders(); trailingHeaders.add(request.trailingHeaders()); trailingHeaders.remove(header); - FullHttpRequest requestWithoutHeader = new DefaultFullHttpRequest(request.protocolVersion(), request.method(), request.uri(), - request.content(), headersWithoutContentTypeHeader, trailingHeaders); + FullHttpRequest requestWithoutHeader = new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + request.content(), + headersWithoutContentTypeHeader, + trailingHeaders + ); return new NioHttpRequest(requestWithoutHeader, new HttpHeadersMap(requestWithoutHeader.headers()), released, pooled, content); } @@ -293,7 +327,9 @@ public Collection> values() { @Override public Set>> entrySet() { - return httpHeaders.names().stream().map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + return httpHeaders.names() + .stream() + .map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) .collect(Collectors.toSet()); } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java index 60cef5545d94e..5d480794d3920 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java @@ -98,9 +98,17 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private volatile NioGroup nioGroup; private ChannelFactory channelFactory; - public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, - Dispatcher dispatcher, NioGroupFactory nioGroupFactory, ClusterSettings clusterSettings) { + public NioHttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + NioGroupFactory nioGroupFactory, + ClusterSettings clusterSettings + ) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.nioGroupFactory = nioGroupFactory; @@ -119,10 +127,15 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, this.tcpSendBufferSize = Math.toIntExact(SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings).getBytes()); this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); - - logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," + - " pipelining_max_events[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents); + logger.debug( + "using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," + + " pipelining_max_events[{}]", + maxChunkSize, + maxHeaderSize, + maxInitialLineLength, + maxContentLength, + pipeliningMaxEvents + ); } public Logger getLogger() { @@ -175,30 +188,58 @@ protected void acceptChannel(NioSocketChannel socketChannel) { private class HttpChannelFactory extends ChannelFactory { private HttpChannelFactory() { - super(tcpNoDelay, tcpKeepAlive, tcpKeepIdle, tcpKeepInterval, tcpKeepCount, reuseAddress, tcpSendBufferSize, - tcpReceiveBufferSize); + super( + tcpNoDelay, + tcpKeepAlive, + tcpKeepIdle, + tcpKeepInterval, + tcpKeepCount, + reuseAddress, + tcpSendBufferSize, + tcpReceiveBufferSize + ); } @Override public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel, Config.Socket socketConfig) { NioHttpChannel httpChannel = new NioHttpChannel(channel); - HttpReadWriteHandler handler = new HttpReadWriteHandler(httpChannel,NioHttpServerTransport.this, - handlingSettings, selector.getTaskScheduler(), threadPool::relativeTimeInMillis); + HttpReadWriteHandler handler = new HttpReadWriteHandler( + httpChannel, + NioHttpServerTransport.this, + handlingSettings, + selector.getTaskScheduler(), + threadPool::relativeTimeInMillis + ); Consumer exceptionHandler = (e) -> onException(httpChannel, e); - SocketChannelContext context = new BytesChannelContext(httpChannel, selector, socketConfig, exceptionHandler, handler, - new InboundChannelBuffer(pageAllocator)); + SocketChannelContext context = new BytesChannelContext( + httpChannel, + selector, + socketConfig, + exceptionHandler, + handler, + new InboundChannelBuffer(pageAllocator) + ); httpChannel.setContext(context); return httpChannel; } @Override - public NioHttpServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel, - Config.ServerSocket socketConfig) { + public NioHttpServerChannel createServerChannel( + NioSelector selector, + ServerSocketChannel channel, + Config.ServerSocket socketConfig + ) { NioHttpServerChannel httpServerChannel = new NioHttpServerChannel(channel); Consumer exceptionHandler = (e) -> onServerException(httpServerChannel, e); Consumer acceptor = NioHttpServerTransport.this::acceptChannel; - ServerChannelContext context = new ServerChannelContext(httpServerChannel, this, selector, socketConfig, acceptor, - exceptionHandler); + ServerChannelContext context = new ServerChannelContext( + httpServerChannel, + this, + selector, + socketConfig, + acceptor, + exceptionHandler + ); httpServerChannel.setContext(context); return httpServerChannel; } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java index b1f973b0f94bf..4dc9c0935be67 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java @@ -76,7 +76,6 @@ private static ByteBuf byteBufFromPage(Page page) { return newByteBuf.slice(offset, buffer.remaining()); } - @Override protected void deallocate() { try { diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioGroupFactory.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioGroupFactory.java index 5ced73e7331c4..8463c81a3492b 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioGroupFactory.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioGroupFactory.java @@ -85,16 +85,22 @@ public synchronized NioGroup getHttpGroup() throws IOException { if (httpWorkerCount == 0) { return getGenericGroup(); } else { - return new NioSelectorGroup(daemonThreadFactory(this.settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), - httpWorkerCount, (s) -> new EventHandler(this::onException, s)); + return new NioSelectorGroup( + daemonThreadFactory(this.settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), + httpWorkerCount, + (s) -> new EventHandler(this::onException, s) + ); } } private NioGroup getGenericGroup() throws IOException { if (refCountedGroup == null) { ThreadFactory threadFactory = daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX); - NioSelectorGroup nioGroup = new NioSelectorGroup(threadFactory, NioTransportPlugin.NIO_WORKER_COUNT.get(settings), - (s) -> new EventHandler(this::onException, s)); + NioSelectorGroup nioGroup = new NioSelectorGroup( + threadFactory, + NioTransportPlugin.NIO_WORKER_COUNT.get(settings), + (s) -> new EventHandler(this::onException, s) + ); this.refCountedGroup = new RefCountedNioGroup(nioGroup); return new WrappedNioGroup(refCountedGroup); } else { @@ -104,8 +110,10 @@ private NioGroup getGenericGroup() throws IOException { } private void onException(Exception exception) { - logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), - exception); + logger.warn( + new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception + ); } private static class RefCountedNioGroup extends AbstractRefCounted implements NioGroup { diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java index e3e41aae9408e..81a07fc646907 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java @@ -87,9 +87,6 @@ public void close() { @Override public String toString() { - return "TcpNioSocketChannel{" + - "localAddress=" + getLocalAddress() + - ", remoteAddress=" + getRemoteAddress() + - '}'; + return "TcpNioSocketChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java index 4aac3201a5478..69218f2f1fcf5 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java @@ -59,8 +59,6 @@ public void addCloseListener(ActionListener listener) { @Override public String toString() { - return "TcpNioServerSocketChannel{" + - "localAddress=" + getLocalAddress() + - '}'; + return "TcpNioServerSocketChannel{" + "localAddress=" + getLocalAddress() + '}'; } } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java index e6f208d915769..b12247c9c711c 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java @@ -77,9 +77,16 @@ public class NioTransport extends TcpTransport { private volatile NioGroup nioGroup; private volatile Function clientChannelFactory; - protected NioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, - PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService, NioGroupFactory groupFactory) { + protected NioTransport( + Settings settings, + Version version, + ThreadPool threadPool, + NetworkService networkService, + PageCacheRecycler pageCacheRecycler, + NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService, + NioGroupFactory groupFactory + ) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.groupFactory = groupFactory; @@ -156,9 +163,16 @@ protected Function clientChannelFactoryFunctio protected abstract static class TcpChannelFactory extends ChannelFactory { protected TcpChannelFactory(ProfileSettings profileSettings) { - super(profileSettings.tcpNoDelay, profileSettings.tcpKeepAlive, profileSettings.tcpKeepIdle, profileSettings.tcpKeepInterval, - profileSettings.tcpKeepCount, profileSettings.reuseAddress, Math.toIntExact(profileSettings.sendBufferSize.getBytes()), - Math.toIntExact(profileSettings.receiveBufferSize.getBytes())); + super( + profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.tcpKeepIdle, + profileSettings.tcpKeepInterval, + profileSettings.tcpKeepCount, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes()) + ); } } @@ -178,15 +192,24 @@ public NioTcpChannel createChannel(NioSelector selector, SocketChannel channel, NioTcpChannel nioChannel = new NioTcpChannel(isClient == false, profileName, channel); Consumer exceptionHandler = (e) -> onException(nioChannel, e); TcpReadWriteHandler handler = new TcpReadWriteHandler(nioChannel, pageCacheRecycler, NioTransport.this); - BytesChannelContext context = new BytesChannelContext(nioChannel, selector, socketConfig, exceptionHandler, handler, - new InboundChannelBuffer(pageAllocator)); + BytesChannelContext context = new BytesChannelContext( + nioChannel, + selector, + socketConfig, + exceptionHandler, + handler, + new InboundChannelBuffer(pageAllocator) + ); nioChannel.setContext(context); return nioChannel; } @Override - public NioTcpServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel, - Config.ServerSocket socketConfig) { + public NioTcpServerChannel createServerChannel( + NioSelector selector, + ServerSocketChannel channel, + Config.ServerSocket socketConfig + ) { NioTcpServerChannel nioChannel = new NioTcpServerChannel(channel); Consumer exceptionHandler = (e) -> onServerException(nioChannel, e); Consumer acceptor = NioTransport.this::acceptChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index 18bbfa4790692..f8816e0686e9d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -68,43 +68,71 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { private static final Logger logger = LogManager.getLogger(NioTransportPlugin.class); - public static final Setting NIO_WORKER_COUNT = - new Setting<>("transport.nio.worker_count", - (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), - (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); - public static final Setting NIO_HTTP_WORKER_COUNT = - intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope); + public static final Setting NIO_WORKER_COUNT = new Setting<>( + "transport.nio.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), + Setting.Property.NodeScope + ); + public static final Setting NIO_HTTP_WORKER_COUNT = intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope); private final SetOnce groupFactory = new SetOnce<>(); @Override public List> getSettings() { - return Arrays.asList( - NIO_HTTP_WORKER_COUNT, - NIO_WORKER_COUNT - ); + return Arrays.asList(NIO_HTTP_WORKER_COUNT, NIO_WORKER_COUNT); } @Override - public Map> getTransports(Settings settings, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { - return Collections.singletonMap(NIO_TRANSPORT_NAME, - () -> new NioTransport(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService, getNioGroupFactory(settings))); + public Map> getTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService + ) { + return Collections.singletonMap( + NIO_TRANSPORT_NAME, + () -> new NioTransport( + settings, + Version.CURRENT, + threadPool, + networkService, + pageCacheRecycler, + namedWriteableRegistry, + circuitBreakerService, + getNioGroupFactory(settings) + ) + ); } @Override - public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedXContentRegistry xContentRegistry, - NetworkService networkService, - HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings) { - return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, - () -> new NioHttpServerTransport(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, - dispatcher, getNioGroupFactory(settings), clusterSettings)); + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings + ) { + return Collections.singletonMap( + NIO_HTTP_TRANSPORT_NAME, + () -> new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageCacheRecycler, + threadPool, + xContentRegistry, + dispatcher, + getNioGroupFactory(settings), + clusterSettings + ) + ); } private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/PageAllocator.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/PageAllocator.java index 728b84ae6b8db..65913b58d870d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/PageAllocator.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/PageAllocator.java @@ -51,7 +51,7 @@ public PageAllocator(PageCacheRecycler recycler) { @Override public Page apply(int length) { - if (length >= RECYCLE_LOWER_THRESHOLD && length <= PageCacheRecycler.BYTE_PAGE_SIZE){ + if (length >= RECYCLE_LOWER_THRESHOLD && length <= PageCacheRecycler.BYTE_PAGE_SIZE) { Recycler.V bytePage = recycler.bytePage(false); return new Page(ByteBuffer.wrap(bytePage.v(), 0, length), bytePage::close); } else { diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java index a3d86585ba735..1427429f0b766 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java @@ -61,8 +61,15 @@ public TcpReadWriteHandler(NioTcpChannel channel, PageCacheRecycler recycler, Tc final ThreadPool threadPool = transport.getThreadPool(); final Supplier breaker = transport.getInflightBreaker(); final Transport.RequestHandlers requestHandlers = transport.getRequestHandlers(); - this.pipeline = new InboundPipeline(transport.getVersion(), transport.getStatsTracker(), recycler, threadPool::relativeTimeInMillis, - breaker, requestHandlers::getHandler, transport::inboundMessage); + this.pipeline = new InboundPipeline( + transport.getVersion(), + transport.getStatsTracker(), + recycler, + threadPool::relativeTimeInMillis, + breaker, + requestHandlers::getHandler, + transport::inboundMessage + ); } @Override diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java index e9b5d72645e80..8fa5a8c8fb129 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java @@ -267,8 +267,6 @@ private static HttpPipelinedResponse emptyGetResponse(int sequence) { return httpResponse; } - - private void prepareHandlerForResponse(HttpReadWriteHandler handler) throws IOException { HttpMethod method = randomBoolean() ? HttpMethod.GET : HttpMethod.HEAD; HttpVersion version = randomBoolean() ? HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java index d68890f8eb466..9ba27802822ea 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java @@ -57,7 +57,7 @@ public void testBasicRead() { message.putInt(i); } message.flip(); - ByteBuffer[] buffers = {message}; + ByteBuffer[] buffers = { message }; assertEquals(40, nettyAdaptor.read(buffers)); assertEquals("0123456789", handler.result); } @@ -70,7 +70,7 @@ public void testBasicReadWithExcessData() { message.putInt(i); } message.flip(); - ByteBuffer[] buffers = {message}; + ByteBuffer[] buffers = { message }; assertEquals(40, nettyAdaptor.read(buffers)); assertEquals("0123456789", handler.result); } @@ -82,20 +82,22 @@ public void testUncaughtReadExceptionsBubbleUp() { message.putInt(i); } message.flip(); - ByteBuffer[] buffers = {message}; + ByteBuffer[] buffers = { message }; expectThrows(IllegalStateException.class, () -> nettyAdaptor.read(buffers)); } public void testWriteInsidePipelineIsCaptured() { TenIntsToStringsHandler tenIntsToStringsHandler = new TenIntsToStringsHandler(); PromiseCheckerHandler promiseCheckerHandler = new PromiseCheckerHandler(); - NettyAdaptor nettyAdaptor = new NettyAdaptor(new CapitalizeWriteHandler(), + NettyAdaptor nettyAdaptor = new NettyAdaptor( + new CapitalizeWriteHandler(), promiseCheckerHandler, new WriteInMiddleHandler(), - tenIntsToStringsHandler); + tenIntsToStringsHandler + ); byte[] bytes = "SHOULD_WRITE".getBytes(StandardCharsets.UTF_8); ByteBuffer message = ByteBuffer.wrap(bytes); - ByteBuffer[] buffers = {message}; + ByteBuffer[] buffers = { message }; assertNull(nettyAdaptor.pollOutboundOperation()); nettyAdaptor.read(buffers); assertFalse(tenIntsToStringsHandler.wasCalled); @@ -112,7 +114,7 @@ public void testCloseListener() { CloseChannelHandler handler = new CloseChannelHandler(); NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); byte[] bytes = "SHOULD_CLOSE".getBytes(StandardCharsets.UTF_8); - ByteBuffer[] buffers = {ByteBuffer.wrap(bytes)}; + ByteBuffer[] buffers = { ByteBuffer.wrap(bytes) }; nettyAdaptor.addCloseListener((v, e) -> listenerCalled.set(true)); assertFalse(listenerCalled.get()); nettyAdaptor.read(buffers); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java index 9c580f1fc64fe..1229aab244a6d 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java @@ -107,8 +107,11 @@ static Collection returnOpaqueIds(Collection responses NioHttpClient() { try { - nioGroup = new NioSelectorGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, - (s) -> new EventHandler(this::onException, s)); + nioGroup = new NioSelectorGroup( + daemonThreadFactory(Settings.EMPTY, "nio-http-client"), + 1, + (s) -> new EventHandler(this::onException, s) + ); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -132,8 +135,10 @@ public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequ } public final NioSocketChannel connect(InetSocketAddress remoteAddress) { - ChannelFactory factory = new ClientChannelFactory(new CountDownLatch(0), new - ArrayList<>()); + ChannelFactory factory = new ClientChannelFactory( + new CountDownLatch(0), + new ArrayList<>() + ); try { NioSocketChannel nioSocketChannel = nioGroup.openChannel(remoteAddress, factory); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -164,8 +169,7 @@ private synchronized Collection sendRequests(InetSocketAddress connectFuture.actionGet(); for (HttpRequest request : requests) { - nioSocketChannel.getContext().sendMessage(request, (v, e) -> { - }); + nioSocketChannel.getContext().sendMessage(request, (v, e) -> {}); } if (latch.await(30L, TimeUnit.SECONDS) == false) { fail("Failed to get all expected responses."); @@ -193,14 +197,16 @@ private class ClientChannelFactory extends ChannelFactory content; private ClientChannelFactory(CountDownLatch latch, Collection content) { - super(NetworkService.TCP_NO_DELAY.get(Settings.EMPTY), + super( + NetworkService.TCP_NO_DELAY.get(Settings.EMPTY), NetworkService.TCP_KEEP_ALIVE.get(Settings.EMPTY), NetworkService.TCP_KEEP_IDLE.get(Settings.EMPTY), NetworkService.TCP_KEEP_INTERVAL.get(Settings.EMPTY), NetworkService.TCP_KEEP_COUNT.get(Settings.EMPTY), NetworkService.TCP_REUSE_ADDRESS.get(Settings.EMPTY), Math.toIntExact(NetworkService.TCP_SEND_BUFFER_SIZE.get(Settings.EMPTY).getBytes()), - Math.toIntExact(NetworkService.TCP_RECEIVE_BUFFER_SIZE.get(Settings.EMPTY).getBytes())); + Math.toIntExact(NetworkService.TCP_RECEIVE_BUFFER_SIZE.get(Settings.EMPTY).getBytes()) + ); this.latch = latch; this.content = content; } @@ -214,15 +220,24 @@ public NioSocketChannel createChannel(NioSelector selector, java.nio.channels.So onException(e); nioSocketChannel.close(); }; - SocketChannelContext context = new BytesChannelContext(nioSocketChannel, selector, socketConfig, exceptionHandler, handler, - InboundChannelBuffer.allocatingInstance()); + SocketChannelContext context = new BytesChannelContext( + nioSocketChannel, + selector, + socketConfig, + exceptionHandler, + handler, + InboundChannelBuffer.allocatingInstance() + ); nioSocketChannel.setContext(context); return nioSocketChannel; } @Override - public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel, - Config.ServerSocket socketConfig) { + public NioServerSocketChannel createServerChannel( + NioSelector selector, + ServerSocketChannel channel, + Config.ServerSocket socketConfig + ) { throw new UnsupportedOperationException("Cannot create server channel"); } } @@ -320,11 +335,13 @@ public void close() throws IOException { private void handleResponse(Object message) { final FullHttpResponse response = (FullHttpResponse) message; - DefaultFullHttpResponse newResponse = new DefaultFullHttpResponse(response.protocolVersion(), + DefaultFullHttpResponse newResponse = new DefaultFullHttpResponse( + response.protocolVersion(), response.status(), Unpooled.copiedBuffer(response.content()), response.headers().copy(), - response.trailingHeaders().copy()); + response.trailingHeaders().copy() + ); response.release(); content.add(newResponse); latch.countDown(); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java index ecd5c2baf38ff..70da3496c2534 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -102,8 +102,10 @@ private void shutdownExecutorService() throws InterruptedException { public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), - new WorkEmulatorHandler()); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel( + new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler() + ); for (int i = 0; i < numberOfRequests; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); @@ -129,8 +131,10 @@ public void testThatPipeliningWorksWithFastSerializedRequests() throws Interrupt public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), - new WorkEmulatorHandler()); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel( + new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler() + ); for (int i = 0; i < numberOfRequests; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); @@ -159,8 +163,10 @@ public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws Int public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), - new WorkEmulatorHandler()); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel( + new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler() + ); for (int i = 0; i < 1 + numberOfRequests + 1; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + Integer.toString(i))); @@ -187,8 +193,7 @@ public void testThatPipeliningClosesConnectionWithTooManyEvents() throws Interru public void testPipeliningRequestsAreReleased() { final int numberOfRequests = 10; - final EmbeddedChannel embeddedChannel = - new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1)); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1)); for (int i = 0; i < numberOfRequests; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + i)); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index f518a8f12daec..9106e9608c778 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -144,8 +144,7 @@ public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedExc final int maxContentLength = randomIntBetween(1, 104857600); final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); - runExpectHeaderTest( - settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); } /** @@ -161,7 +160,8 @@ private void runExpectHeaderTest( final Settings settings, final String expectation, final int contentLength, - final HttpResponseStatus expectedStatus) throws InterruptedException { + final HttpResponseStatus expectedStatus + ) throws InterruptedException { final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { @Override public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { @@ -170,14 +170,26 @@ public void dispatchRequest(RestRequest request, RestChannel channel, ThreadCont @Override public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { - logger.error(new ParameterizedMessage("--> Unexpected bad request [{}]", - FakeRestRequest.requestToString(channel.request())), cause); + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); throw new AssertionError(); } }; - try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, - xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (NioHttpClient client = new NioHttpClient()) { @@ -189,13 +201,18 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, try { assertThat(response.status(), equalTo(expectedStatus)); if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { - final FullHttpRequest continuationRequest = - new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER); + final FullHttpRequest continuationRequest = new DefaultFullHttpRequest( + HttpVersion.HTTP_1_1, + HttpMethod.POST, + "/", + Unpooled.EMPTY_BUFFER + ); final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest); try { assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); assertThat( - new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done") + new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), + is("done") ); } finally { continuationResponse.release(); @@ -210,23 +227,40 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, public void testBindUnavailableAddress() { final Settings initialSettings = createSettings(); - try (NioHttpServerTransport transport = new NioHttpServerTransport(initialSettings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + initialSettings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + new NullDispatcher(), + new NioGroupFactory(Settings.EMPTY, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); Settings settings = Settings.builder() .put("http.port", remoteAddress.getPort()) .put("network.host", remoteAddress.getAddress()) .build(); - try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport otherTransport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + new NullDispatcher(), + new NioGroupFactory(Settings.EMPTY, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); - assertEquals( - "Failed to bind to " + NetworkAddress.format(remoteAddress.address()), - bindHttpException.getMessage() - ); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); } } } @@ -241,24 +275,33 @@ public void dispatchRequest(final RestRequest request, final RestChannel channel } @Override - public void dispatchBadRequest(final RestChannel channel, - final ThreadContext threadContext, - final Throwable cause) { - logger.error(new ParameterizedMessage("--> Unexpected bad request [{}]", - FakeRestRequest.requestToString(channel.request())), cause); + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); throw new AssertionError(); } }; - final Settings settings = createBuilderWithPort() - .put(SETTING_CORS_ENABLED.getKey(), true) + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") .build(); - try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -311,16 +354,28 @@ public void dispatchRequest(final RestRequest request, final RestChannel channel @Override public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { - logger.error(new ParameterizedMessage("--> Unexpected bad request [{}]", - FakeRestRequest.requestToString(channel.request())), cause); + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); throw new AssertionError(); } }; - try (NioHttpServerTransport transport = new NioHttpServerTransport( - Settings.EMPTY, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher, - new NioGroupFactory(Settings.EMPTY, logger), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(Settings.EMPTY, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -374,9 +429,19 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); } - try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -389,7 +454,8 @@ threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); assertThat( new String(response.content().array(), Charset.forName("UTF-8")), - containsString("you sent a bad request and you should feel bad")); + containsString("you sent a bad request and you should feel bad") + ); } finally { response.release(); } @@ -410,23 +476,34 @@ public void dispatchRequest(final RestRequest request, final RestChannel channel } @Override - public void dispatchBadRequest(final RestChannel channel, - final ThreadContext threadContext, - final Throwable cause) { - logger.error(new ParameterizedMessage("--> Unexpected bad request [{}]", - FakeRestRequest.requestToString(channel.request())), cause); + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); throw new AssertionError("Should not have received a dispatched request"); } }; - Settings settings = createBuilderWithPort() - .put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300))) - .build(); - - try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + try ( + NioHttpServerTransport transport = new NioHttpServerTransport( + settings, + networkService, + bigArrays, + pageRecycler, + threadPool, + xContentRegistry(), + dispatcher, + new NioGroupFactory(settings, logger), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ) + ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/NioGroupFactoryTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/NioGroupFactoryTests.java index 921f3bfabd998..965632af1daf2 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/NioGroupFactoryTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/NioGroupFactoryTests.java @@ -80,8 +80,11 @@ public NioSocketChannel createChannel(NioSelector selector, SocketChannel channe } @Override - public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel, - Config.ServerSocket socketConfig) { + public NioServerSocketChannel createServerChannel( + NioSelector selector, + ServerSocketChannel channel, + Config.ServerSocket socketConfig + ) { NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); Consumer exceptionHandler = (e) -> {}; Consumer acceptor = (c) -> {}; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index dcf6814cb2e09..c8b9fa63383bf 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -74,12 +74,24 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase { protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); - return new NioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), - namedWriteableRegistry, new NoneCircuitBreakerService(), new NioGroupFactory(settings, logger)) { + return new NioTransport( + settings, + version, + threadPool, + networkService, + new MockPageCacheRecycler(settings), + namedWriteableRegistry, + new NoneCircuitBreakerService(), + new NioGroupFactory(settings, logger) + ) { @Override - public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, - ActionListener listener) { + public void executeHandshake( + DiscoveryNode node, + TcpChannel channel, + ConnectionProfile profile, + ActionListener listener + ) { if (doHandshake) { super.executeHandshake(node, channel, profile, listener); } else { @@ -91,8 +103,15 @@ public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionP public void testConnectException() throws UnknownHostException { try { - serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), - emptyMap(), emptySet(),Version.CURRENT)); + serviceA.connectToNode( + new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), + emptySet(), + Version.CURRENT + ) + ); fail("Expected ConnectTransportException"); } catch (ConnectTransportException e) { assertThat(e.getMessage(), containsString("connect_exception")); @@ -103,11 +122,14 @@ public void testConnectException() throws UnknownHostException { } public void testDefaultKeepAliveSettings() throws IOException { - assumeTrue("setting default keepalive options not supported on this platform", - (IOUtils.LINUX || IOUtils.MAC_OS_X) && - JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0); - try (MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); - MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY)) { + assumeTrue( + "setting default keepalive options not supported on this platform", + (IOUtils.LINUX || IOUtils.MAC_OS_X) && JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0 + ); + try ( + MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); + MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY) + ) { serviceC.start(); serviceC.acceptIncomingRequests(); serviceD.start(); diff --git a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java index 47239ae1c4a34..ec891ef8d44ef 100644 --- a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java @@ -51,18 +51,14 @@ public class DieWithDignityIT extends OpenSearchRestTestCase { public void testDieWithDignity() throws Exception { - expectThrows( - IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity")) - ); + expectThrows(IOException.class, () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); // the OpenSearch process should die and disappear from the output of jps assertBusy(() -> { final String jpsPath = PathUtils.get(System.getProperty("runtime.java.home"), "bin/jps").toString(); final Process process = new ProcessBuilder().command(jpsPath, "-v").start(); - try (InputStream is = process.getInputStream(); - BufferedReader in = new BufferedReader(new InputStreamReader(is, "UTF-8"))) { + try (InputStream is = process.getInputStream(); BufferedReader in = new BufferedReader(new InputStreamReader(is, "UTF-8"))) { String line; while ((line = in.readLine()) != null) { assertThat(line, line, not(containsString("-Ddie.with.dignity.test"))); @@ -82,8 +78,10 @@ public void testDieWithDignity() throws Exception { final String line = it.next(); if (line.matches(".*ERROR.*o\\.o\\.ExceptionsHelper.*javaRestTest-0.*fatal error.*")) { fatalError = true; - } else if (line.matches(".*ERROR.*o\\.o\\.b\\.OpenSearchUncaughtExceptionHandler.*javaRestTest-0.*" - + "fatal error in thread \\[Thread-\\d+\\], exiting.*")) { + } else if (line.matches( + ".*ERROR.*o\\.o\\.b\\.OpenSearchUncaughtExceptionHandler.*javaRestTest-0.*" + + "fatal error in thread \\[Thread-\\d+\\], exiting.*" + )) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); assertThat(it.next(), containsString("java.lang.OutOfMemoryError: die with dignity")); @@ -123,7 +121,8 @@ protected boolean preserveClusterUponCompletion() { @Override protected final Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()) + return Settings.builder() + .put(super.restClientSettings()) // increase the timeout here to 90 seconds to handle long waits for a green // cluster health. the waits for green need to be longer than a minute to // account for delayed shards diff --git a/qa/die-with-dignity/src/main/java/org/opensearch/DieWithDignityPlugin.java b/qa/die-with-dignity/src/main/java/org/opensearch/DieWithDignityPlugin.java index 6c5fedbfebc06..9df2b3ea8e043 100644 --- a/qa/die-with-dignity/src/main/java/org/opensearch/DieWithDignityPlugin.java +++ b/qa/die-with-dignity/src/main/java/org/opensearch/DieWithDignityPlugin.java @@ -55,13 +55,14 @@ public DieWithDignityPlugin() { @Override public List getRestHandlers( - final Settings settings, - final RestController restController, - final ClusterSettings clusterSettings, - final IndexScopedSettings indexScopedSettings, - final SettingsFilter settingsFilter, - final IndexNameExpressionResolver indexNameExpressionResolver, - final Supplier nodesInCluster) { + final Settings settings, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster + ) { return Collections.singletonList(new RestDieWithDignityAction()); }