From afff380ab1e9ae65a11d50016c909059301aba18 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 20 Jun 2018 21:42:15 +0300 Subject: [PATCH 01/31] [DOCS] Fix JDBC Maven client group/artifact ID --- x-pack/docs/en/sql/endpoints/jdbc.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc index a980278810e57..6959035bf09e4 100644 --- a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc +++ b/x-pack/docs/en/sql/endpoints/jdbc.asciidoc @@ -14,8 +14,8 @@ The JDBC driver can be obtained either by downloading it from the https://www.el ["source","xml",subs="attributes"] ---- - org.elasticsearch.plugin.jdbc - jdbc + org.elasticsearch.plugin + x-pack-sql-jdbc {version} ---- From 26c2347f61b80a87f849619a678777591814804b Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Thu, 21 Jun 2018 06:16:33 +0300 Subject: [PATCH 02/31] Reload secure settings for plugins (#31481) Adds the ability to reread and decrypt the local node keystore. Commonly, the contents of the keystore, backing the `SecureSettings`, are not retrievable except during node initialization. This changes that by adding a new API which broadcasts a password to every node. The password is used to decrypt the local keystore and use it to populate a `Settings` object that is passes to all the plugins implementing the `ReloadablePlugin` interface. The plugin is then responsible to do whatever "reload" means in his case. When the `reload`handler returns, the keystore is closed and its contents are no longer retrievable. Password is never stored persistently on any node. Plugins that have been moded in this commit are: `repository-azure`, `repository-s3`, `repository-gcs` and `discovery-ec2`. --- .../discovery/ec2/AmazonEc2Reference.java | 61 +++ .../discovery/ec2/AwsEc2Service.java | 74 ++- .../discovery/ec2/AwsEc2ServiceImpl.java | 148 +++--- .../ec2/AwsEc2UnicastHostsProvider.java | 43 +- .../discovery/ec2/Ec2ClientSettings.java | 145 ++++++ .../discovery/ec2/Ec2DiscoveryPlugin.java | 71 +-- .../discovery/ec2/AmazonEC2Mock.java | 15 +- .../discovery/ec2/AwsEc2ServiceImplTests.java | 31 +- .../discovery/ec2/AwsEc2ServiceMock.java | 33 +- .../discovery/ec2/Ec2DiscoveryPluginMock.java | 38 ++ .../ec2/Ec2DiscoveryPluginTests.java | 93 +++- .../discovery/ec2/Ec2DiscoveryTests.java | 46 +- .../cloud/azure/blobstore/AzureBlobStore.java | 59 +-- .../azure/storage/AzureStorageService.java | 47 +- .../storage/AzureStorageServiceImpl.java | 339 ++++++-------- .../azure/storage/AzureStorageSettings.java | 135 ++++-- .../azure/AzureRepositoryPlugin.java | 25 +- .../repositories/azure/AzureRepository.java | 52 +-- .../storage/AzureStorageServiceMock.java | 40 +- .../storage/AzureStorageServiceTests.java | 230 ++++++---- .../AzureStorageSettingsFilterTests.java | 10 + .../azure/AzureRepositorySettingsTests.java | 3 +- .../azure/AzureSnapshotRestoreTests.java | 58 ++- .../gcs/GoogleCloudStorageBlobStore.java | 104 ++--- .../gcs/GoogleCloudStoragePlugin.java | 32 +- .../gcs/GoogleCloudStorageRepository.java | 29 +- .../gcs/GoogleCloudStorageService.java | 152 ++++++- ...leCloudStorageBlobStoreContainerTests.java | 15 +- ...eCloudStorageBlobStoreRepositoryTests.java | 24 +- .../gcs/GoogleCloudStorageBlobStoreTests.java | 15 +- ...loudStorageRepositoryDeprecationTests.java | 23 +- .../gcs/GoogleCloudStorageServiceTests.java | 107 ++++- plugins/repository-s3/build.gradle | 2 +- .../repositories/s3/AmazonS3Reference.java | 63 +++ .../repositories/s3/AwsS3Service.java | 23 +- .../repositories/s3/InternalAwsS3Service.java | 147 +++--- .../repositories/s3/S3BlobContainer.java | 137 +++--- .../repositories/s3/S3BlobStore.java | 74 +-- .../repositories/s3/S3ClientSettings.java | 72 ++- .../repositories/s3/S3Repository.java | 46 +- .../repositories/s3/S3RepositoryPlugin.java | 45 +- .../plugin-metadata/plugin-security.policy | 3 + .../s3/AbstractS3SnapshotRestoreTest.java | 28 +- .../repositories/s3/AmazonS3Wrapper.java | 5 + .../s3/AwsS3ServiceImplTests.java | 134 +++--- .../repositories/s3/MockAmazonS3.java | 5 + .../s3/RepositoryCredentialsTests.java | 211 +++++++++ .../RepositorySettingsCredentialsTests.java | 41 -- .../s3/S3BlobStoreContainerTests.java | 31 +- .../s3/S3BlobStoreRepositoryTests.java | 26 +- .../repositories/s3/S3BlobStoreTests.java | 12 +- .../repositories/s3/S3RepositoryTests.java | 89 ++-- .../repositories/s3/TestAmazonS3.java | 38 +- .../repositories/s3/TestAwsS3Service.java | 28 +- .../elasticsearch/action/ActionModule.java | 6 + .../NodesReloadSecureSettingsAction.java | 45 ++ .../NodesReloadSecureSettingsRequest.java | 160 +++++++ ...desReloadSecureSettingsRequestBuilder.java | 84 ++++ .../NodesReloadSecureSettingsResponse.java | 149 +++++++ ...nsportNodesReloadSecureSettingsAction.java | 144 ++++++ .../client/ClusterAdminClient.java | 6 + .../client/support/AbstractClient.java | 7 + .../common/settings/KeyStoreWrapper.java | 4 +- .../common/util/LazyInitializable.java | 108 +++++ .../org/elasticsearch/plugins/Plugin.java | 1 + .../plugins/ReloadablePlugin.java | 54 +++ .../RestReloadSecureSettingsAction.java | 87 ++++ .../action/admin/ReloadSecureSettingsIT.java | 422 ++++++++++++++++++ .../action/admin/invalid.txt.keystore | 3 + 69 files changed, 3551 insertions(+), 1286 deletions(-) create mode 100644 plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java create mode 100644 plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java create mode 100644 plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java create mode 100644 plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java delete mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java create mode 100644 server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java create mode 100644 server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java create mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java create mode 100644 server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java new file mode 100644 index 0000000000000..0b0b208790b48 --- /dev/null +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.services.ec2.AmazonEC2; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; + +/** + * Handles the shutdown of the wrapped {@link AmazonEC2} using reference + * counting. + */ +public class AmazonEc2Reference extends AbstractRefCounted implements Releasable { + + private final AmazonEC2 client; + + AmazonEc2Reference(AmazonEC2 client) { + super("AWS_EC2_CLIENT"); + this.client = client; + } + + /** + * Call when the client is not needed anymore. + */ + @Override + public void close() { + decRef(); + } + + /** + * Returns the underlying `AmazonEC2` client. All method calls are permitted BUT + * NOT shutdown. Shutdown is called when reference count reaches 0. + */ + public AmazonEC2 client() { + return client; + } + + @Override + protected void closeInternal() { + client.shutdown(); + } + +} diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java index db3164fe9007a..976f1db26d173 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Service.java @@ -19,22 +19,17 @@ package org.elasticsearch.discovery.ec2; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.services.ec2.AmazonEC2; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import java.io.Closeable; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.function.Function; -interface AwsEc2Service { +interface AwsEc2Service extends Closeable { Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, Property.NodeScope); class HostType { @@ -45,36 +40,6 @@ class HostType { public static final String TAG_PREFIX = "tag:"; } - /** The access key (ie login id) for connecting to ec2. */ - Setting ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null); - - /** The secret key (ie password) for connecting to ec2. */ - Setting SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null); - - /** An override for the ec2 endpoint to connect to. */ - Setting ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", - s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); - - /** The protocol to use to connect to to ec2. */ - Setting PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https", - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); - - /** The host name of a proxy to connect to ec2 through. */ - Setting PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); - - /** The port of a proxy to connect to ec2 through. */ - Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1<<16, Property.NodeScope); - - /** The username of a proxy to connect to s3 through. */ - Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null); - - /** The password of a proxy to connect to s3 through. */ - Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null); - - /** The socket timeout for connecting to s3. */ - Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", - TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); - /** * discovery.ec2.host_type: The type of host type to use to communicate with other instances. * Can be one of private_ip, public_ip, private_dns, public_dns or tag:XXXX where @@ -87,26 +52,24 @@ class HostType { * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the * discovery. Defaults to true. */ - Setting ANY_GROUP_SETTING = - Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); + Setting ANY_GROUP_SETTING = Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); /** * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) */ - Setting> GROUPS_SETTING = - Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), Property.NodeScope); + Setting> GROUPS_SETTING = Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), + Property.NodeScope); /** * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within * the provided availability zones will be used in the cluster discovery. */ - Setting> AVAILABILITY_ZONES_SETTING = - Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), - Property.NodeScope); + Setting> AVAILABILITY_ZONES_SETTING = Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), + s -> s.toString(), Property.NodeScope); /** * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. */ - Setting NODE_CACHE_TIME_SETTING = - Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), Property.NodeScope); + Setting NODE_CACHE_TIME_SETTING = Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), + Property.NodeScope); /** * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). @@ -115,7 +78,22 @@ class HostType { * instance to be included. */ Setting.AffixSetting> TAG_SETTING = Setting.prefixKeySetting("discovery.ec2.tag.", - key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope)); + + /** + * Builds then caches an {@code AmazonEC2} client using the current client + * settings. Returns an {@code AmazonEc2Reference} wrapper which should be + * released as soon as it is not required anymore. + */ + AmazonEc2Reference client(); + + /** + * Updates the settings for building the client and releases the cached one. + * Future client requests will use the new settings to lazily built the new + * client. + * + * @param clientSettings the new refreshed settings + */ + void refreshAndClearCache(Ec2ClientSettings clientSettings); - AmazonEC2 client(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index b53dc7a876301..67902174630ea 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -19,12 +19,9 @@ package org.elasticsearch.discovery.ec2; -import java.io.Closeable; -import java.io.IOException; import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; @@ -35,112 +32,117 @@ import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LazyInitializable; -class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Closeable { +class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; - private AmazonEC2Client client; + private final AtomicReference> lazyClientReference = + new AtomicReference<>(); AwsEc2ServiceImpl(Settings settings) { super(settings); } - @Override - public synchronized AmazonEC2 client() { - if (client != null) { - return client; - } - - this.client = new AmazonEC2Client(buildCredentials(logger, settings), buildConfiguration(logger, settings)); - String endpoint = findEndpoint(logger, settings); - if (endpoint != null) { - client.setEndpoint(endpoint); + private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + final ClientConfiguration configuration = buildConfiguration(logger, clientSettings); + final AmazonEC2 client = buildClient(credentials, configuration); + if (Strings.hasText(clientSettings.endpoint)) { + logger.debug("using explicit ec2 endpoint [{}]", clientSettings.endpoint); + client.setEndpoint(clientSettings.endpoint); } - - return this.client; + return client; } - protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) { - AWSCredentialsProvider credentials; - - try (SecureString key = ACCESS_KEY_SETTING.get(settings); - SecureString secret = SECRET_KEY_SETTING.get(settings)) { - if (key.length() == 0 && secret.length() == 0) { - logger.debug("Using either environment variables, system properties or instance profile credentials"); - credentials = new DefaultAWSCredentialsProviderChain(); - } else { - logger.debug("Using basic key/secret credentials"); - credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); - } - } - - return credentials; + // proxy for testing + AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + final AmazonEC2 client = new AmazonEC2Client(credentials, configuration); + return client; } - protected static ClientConfiguration buildConfiguration(Logger logger, Settings settings) { - ClientConfiguration clientConfiguration = new ClientConfiguration(); + // pkg private for tests + static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings clientSettings) { + final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - clientConfiguration.setProtocol(PROTOCOL_SETTING.get(settings)); - - if (PROXY_HOST_SETTING.exists(settings)) { - String proxyHost = PROXY_HOST_SETTING.get(settings); - Integer proxyPort = PROXY_PORT_SETTING.get(settings); - try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); - SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { - - clientConfiguration - .withProxyHost(proxyHost) - .withProxyPort(proxyPort) - .withProxyUsername(proxyUsername.toString()) - .withProxyPassword(proxyPassword.toString()); - } + clientConfiguration.setProtocol(clientSettings.protocol); + if (Strings.hasText(clientSettings.proxyHost)) { + // TODO: remove this leniency, these settings should exist together and be validated + clientConfiguration.setProxyHost(clientSettings.proxyHost); + clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyUsername(clientSettings.proxyUsername); + clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } - // Increase the number of retries in case of 5xx API responses final Random rand = Randomness.get(); - RetryPolicy retryPolicy = new RetryPolicy( + final RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, - new RetryPolicy.BackoffStrategy() { - @Override - public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest, - AmazonClientException exception, - int retriesAttempted) { - // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) - logger.warn("EC2 API request failed, retry again. Reason was:", exception); - return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); - } + (originalRequest, exception, retriesAttempted) -> { + // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) + logger.warn("EC2 API request failed, retry again. Reason was:", exception); + return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); }, 10, false); clientConfiguration.setRetryPolicy(retryPolicy); - clientConfiguration.setSocketTimeout((int) READ_TIMEOUT_SETTING.get(settings).millis()); - + clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } - protected static String findEndpoint(Logger logger, Settings settings) { - String endpoint = null; - if (ENDPOINT_SETTING.exists(settings)) { - endpoint = ENDPOINT_SETTING.get(settings); - logger.debug("using explicit ec2 endpoint [{}]", endpoint); + // pkg private for tests + static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { + final BasicAWSCredentials credentials = clientSettings.credentials; + if (credentials == null) { + logger.debug("Using either environment variables, system properties or instance profile credentials"); + return new DefaultAWSCredentialsProviderChain(); + } else { + logger.debug("Using basic key/secret credentials"); + return new StaticCredentialsProvider(credentials); } - return endpoint; } @Override - public void close() throws IOException { - if (client != null) { - client.shutdown(); + public AmazonEc2Reference client() { + final LazyInitializable clientReference = this.lazyClientReference.get(); + if (clientReference == null) { + throw new IllegalStateException("Missing ec2 client configs"); } + return clientReference.getOrCompute(); + } - // Ensure that IdleConnectionReaper is shutdown + /** + * Refreshes the settings for the AmazonEC2 client. The new client will be build + * using these new settings. The old client is usable until released. On release it + * will be destroyed instead of being returned to the cache. + */ + @Override + public void refreshAndClearCache(Ec2ClientSettings clientSettings) { + final LazyInitializable newClient = new LazyInitializable<>( + () -> new AmazonEc2Reference(buildClient(clientSettings)), clientReference -> clientReference.incRef(), + clientReference -> clientReference.decRef()); + final LazyInitializable oldClient = this.lazyClientReference.getAndSet(newClient); + if (oldClient != null) { + oldClient.reset(); + } + } + + @Override + public void close() { + final LazyInitializable clientReference = this.lazyClientReference.getAndSet(null); + if (clientReference != null) { + clientReference.reset(); + } + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } + } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index f291413d408ed..2c536981b04c5 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.AmazonClientException; -import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; import com.amazonaws.services.ec2.model.DescribeInstancesResult; import com.amazonaws.services.ec2.model.Filter; @@ -59,7 +58,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos private final TransportService transportService; - private final AmazonEC2 client; + private final AwsEc2Service awsEc2Service; private final boolean bindAnyGroup; @@ -76,7 +75,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { super(settings); this.transportService = transportService; - this.client = awsEc2Service.client(); + this.awsEc2Service = awsEc2Service; this.hostType = AwsEc2Service.HOST_TYPE_SETTING.get(settings); this.discoNodes = new DiscoNodesCache(AwsEc2Service.NODE_CACHE_TIME_SETTING.get(settings)); @@ -103,31 +102,31 @@ public List buildDynamicNodes() { protected List fetchDynamicNodes() { - List discoNodes = new ArrayList<>(); + final List discoNodes = new ArrayList<>(); - DescribeInstancesResult descInstances; - try { + final DescribeInstancesResult descInstances; + try (AmazonEc2Reference clientReference = awsEc2Service.client()) { // Query EC2 API based on AZ, instance state, and tag. // NOTE: we don't filter by security group during the describe instances request for two reasons: // 1. differences in VPCs require different parameters during query (ID vs Name) // 2. We want to use two different strategies: (all security groups vs. any security groups) - descInstances = SocketAccess.doPrivileged(() -> client.describeInstances(buildDescribeInstancesRequest())); - } catch (AmazonClientException e) { + descInstances = SocketAccess.doPrivileged(() -> clientReference.client().describeInstances(buildDescribeInstancesRequest())); + } catch (final AmazonClientException e) { logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage()); logger.debug("Full exception:", e); return discoNodes; } logger.trace("building dynamic unicast discovery nodes..."); - for (Reservation reservation : descInstances.getReservations()) { - for (Instance instance : reservation.getInstances()) { + for (final Reservation reservation : descInstances.getReservations()) { + for (final Instance instance : reservation.getInstances()) { // lets see if we can filter based on groups if (!groups.isEmpty()) { - List instanceSecurityGroups = instance.getSecurityGroups(); - List securityGroupNames = new ArrayList<>(instanceSecurityGroups.size()); - List securityGroupIds = new ArrayList<>(instanceSecurityGroups.size()); - for (GroupIdentifier sg : instanceSecurityGroups) { + final List instanceSecurityGroups = instance.getSecurityGroups(); + final List securityGroupNames = new ArrayList<>(instanceSecurityGroups.size()); + final List securityGroupIds = new ArrayList<>(instanceSecurityGroups.size()); + for (final GroupIdentifier sg : instanceSecurityGroups) { securityGroupNames.add(sg.getGroupName()); securityGroupIds.add(sg.getGroupId()); } @@ -162,10 +161,10 @@ && disjoint(securityGroupIds, groups)) { address = instance.getPublicIpAddress(); } else if (hostType.startsWith(TAG_PREFIX)) { // Reading the node host from its metadata - String tagName = hostType.substring(TAG_PREFIX.length()); + final String tagName = hostType.substring(TAG_PREFIX.length()); logger.debug("reading hostname from [{}] instance tag", tagName); - List tags = instance.getTags(); - for (Tag tag : tags) { + final List tags = instance.getTags(); + for (final Tag tag : tags) { if (tag.getKey().equals(tagName)) { address = tag.getValue(); logger.debug("using [{}] as the instance address", address); @@ -177,13 +176,13 @@ && disjoint(securityGroupIds, groups)) { if (address != null) { try { // we only limit to 1 port per address, makes no sense to ping 100 ports - TransportAddress[] addresses = transportService.addressesFromString(address, 1); + final TransportAddress[] addresses = transportService.addressesFromString(address, 1); for (int i = 0; i < addresses.length; i++) { logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]); discoNodes.add(new DiscoveryNode(instance.getInstanceId(), "#cloud-" + instance.getInstanceId() + "-" + i, addresses[i], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); } - } catch (Exception e) { + } catch (final Exception e) { final String finalAddress = address; logger.warn( (Supplier) @@ -201,12 +200,12 @@ && disjoint(securityGroupIds, groups)) { } private DescribeInstancesRequest buildDescribeInstancesRequest() { - DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest() + final DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest() .withFilters( new Filter("instance-state-name").withValues("running", "pending") ); - for (Map.Entry> tagFilter : tags.entrySet()) { + for (final Map.Entry> tagFilter : tags.entrySet()) { // for a given tag key, OR relationship for multiple different values describeInstancesRequest.withFilters( new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue()) @@ -238,7 +237,7 @@ protected boolean needsRefresh() { @Override protected List refresh() { - List nodes = fetchDynamicNodes(); + final List nodes = fetchDynamicNodes(); empty = nodes.isEmpty(); return nodes; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java new file mode 100644 index 0000000000000..b42b0d546001a --- /dev/null +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.BasicAWSCredentials; + +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.TimeValue; +import java.util.Locale; + +/** + * A container for settings used to create an EC2 client. + */ +final class Ec2ClientSettings { + + /** The access key (ie login id) for connecting to ec2. */ + static final Setting ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null); + + /** The secret key (ie password) for connecting to ec2. */ + static final Setting SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null); + + /** The host name of a proxy to connect to ec2 through. */ + static final Setting PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); + + /** The port of a proxy to connect to ec2 through. */ + static final Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + + /** An override for the ec2 endpoint to connect to. */ + static final Setting ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", s -> s.toLowerCase(Locale.ROOT), + Property.NodeScope); + + /** The protocol to use to connect to to ec2. */ + static final Setting PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https", + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); + + /** The username of a proxy to connect to s3 through. */ + static final Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null); + + /** The password of a proxy to connect to s3 through. */ + static final Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null); + + /** The socket timeout for connecting to s3. */ + static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", + TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); + + /** Credentials to authenticate with ec2. */ + final BasicAWSCredentials credentials; + + /** + * The ec2 endpoint the client should talk to, or empty string to use the + * default. + */ + final String endpoint; + + /** The protocol to use to talk to ec2. Defaults to https. */ + final Protocol protocol; + + /** An optional proxy host that requests to ec2 should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the ec2 client only takes String, so + // storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the ec2 client. */ + final int readTimeoutMillis; + + protected Ec2ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, + String proxyUsername, String proxyPassword, int readTimeoutMillis) { + this.credentials = credentials; + this.endpoint = endpoint; + this.protocol = protocol; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + static BasicAWSCredentials loadCredentials(Settings settings) { + try (SecureString accessKey = ACCESS_KEY_SETTING.get(settings); + SecureString secretKey = SECRET_KEY_SETTING.get(settings);) { + if (accessKey.length() != 0) { + if (secretKey.length() != 0) { + return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + } else { + throw new IllegalArgumentException("Missing secret key for ec2 client."); + } + } else if (secretKey.length() != 0) { + throw new IllegalArgumentException("Missing access key for ec2 client."); + } + return null; + } + } + + // pkg private for tests + /** Parse settings for a single client. */ + static Ec2ClientSettings getClientSettings(Settings settings) { + final BasicAWSCredentials credentials = loadCredentials(settings); + try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); + SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { + return new Ec2ClientSettings( + credentials, + ENDPOINT_SETTING.get(settings), + PROTOCOL_SETTING.get(settings), + PROXY_HOST_SETTING.get(settings), + PROXY_PORT_SETTING.get(settings), + proxyUsername.toString(), + proxyPassword.toString(), + (int)READ_TIMEOUT_SETTING.get(settings).millis()); + } + } + +} diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 1617e4aebfa46..79f653d5bde55 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -21,11 +21,8 @@ import com.amazonaws.util.json.Jackson; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.internal.io.IOUtils; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -34,6 +31,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.transport.TransportService; import java.io.UncheckedIOException; @@ -41,7 +39,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.Closeable; import java.net.URL; import java.net.URLConnection; import java.nio.charset.StandardCharsets; @@ -53,10 +50,9 @@ import java.util.Map; import java.util.function.Supplier; -public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable { +public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReloadablePlugin { private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public static final String EC2 = "ec2"; @@ -71,22 +67,27 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); - } catch (ClassNotFoundException e) { + } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } return null; }); } - private Settings settings; - // stashed when created in order to properly close - private final SetOnce ec2Service = new SetOnce<>(); + private final Settings settings; + // protected for testing + protected final AwsEc2Service ec2Service; public Ec2DiscoveryPlugin(Settings settings) { - this.settings = settings; + this(settings, new AwsEc2ServiceImpl(settings)); } - + protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { + this.settings = settings; + this.ec2Service = ec2Service; + // eagerly load client settings when secure settings are accessible + reload(settings); + } @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { @@ -97,25 +98,22 @@ public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap(EC2, () -> { - ec2Service.set(new AwsEc2ServiceImpl(settings)); - return new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service.get()); - }); + return Collections.singletonMap(EC2, () -> new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service)); } @Override public List> getSettings() { return Arrays.asList( // Register EC2 discovery settings: discovery.ec2 - AwsEc2Service.ACCESS_KEY_SETTING, - AwsEc2Service.SECRET_KEY_SETTING, - AwsEc2Service.ENDPOINT_SETTING, - AwsEc2Service.PROTOCOL_SETTING, - AwsEc2Service.PROXY_HOST_SETTING, - AwsEc2Service.PROXY_PORT_SETTING, - AwsEc2Service.PROXY_USERNAME_SETTING, - AwsEc2Service.PROXY_PASSWORD_SETTING, - AwsEc2Service.READ_TIMEOUT_SETTING, + Ec2ClientSettings.ACCESS_KEY_SETTING, + Ec2ClientSettings.SECRET_KEY_SETTING, + Ec2ClientSettings.ENDPOINT_SETTING, + Ec2ClientSettings.PROTOCOL_SETTING, + Ec2ClientSettings.PROXY_HOST_SETTING, + Ec2ClientSettings.PROXY_PORT_SETTING, + Ec2ClientSettings.PROXY_USERNAME_SETTING, + Ec2ClientSettings.PROXY_PASSWORD_SETTING, + Ec2ClientSettings.READ_TIMEOUT_SETTING, AwsEc2Service.HOST_TYPE_SETTING, AwsEc2Service.ANY_GROUP_SETTING, AwsEc2Service.GROUPS_SETTING, @@ -128,10 +126,10 @@ public List> getSettings() { @Override public Settings additionalSettings() { - Settings.Builder builder = Settings.builder(); + final Settings.Builder builder = Settings.builder(); // Adds a node attribute for the ec2 availability zone - String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; + final String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl)); return builder.build(); } @@ -142,7 +140,7 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe if (AwsEc2Service.AUTO_ATTRIBUTE_SETTING.get(settings) == false) { return Settings.EMPTY; } - Settings.Builder attrs = Settings.builder(); + final Settings.Builder attrs = Settings.builder(); final URL url; final URLConnection urlConnection; @@ -151,7 +149,7 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe logger.debug("obtaining ec2 [placement/availability-zone] from ec2 meta-data url {}", url); urlConnection = SocketAccess.doPrivilegedIOException(url::openConnection); urlConnection.setConnectTimeout(2000); - } catch (IOException e) { + } catch (final IOException e) { // should not happen, we know the url is not malformed, and openConnection does not actually hit network throw new UncheckedIOException(e); } @@ -159,13 +157,13 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe try (InputStream in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream); BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { - String metadataResult = urlReader.readLine(); - if (metadataResult == null || metadataResult.length() == 0) { + final String metadataResult = urlReader.readLine(); + if ((metadataResult == null) || (metadataResult.length() == 0)) { throw new IllegalStateException("no ec2 metadata returned from " + url); } else { attrs.put(Node.NODE_ATTRIBUTES.getKey() + "aws_availability_zone", metadataResult); } - } catch (IOException e) { + } catch (final IOException e) { // this is lenient so the plugin does not fail when installed outside of ec2 logger.error("failed to get metadata for [placement/availability-zone]", e); } @@ -175,6 +173,13 @@ static Settings getAvailabilityZoneNodeAttributes(Settings settings, String azMe @Override public void close() throws IOException { - IOUtils.close(ec2Service.get()); + ec2Service.close(); + } + + @Override + public void reload(Settings settings) { + // secure settings should be readable + final Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings); + ec2Service.refreshAndClearCache(clientSettings); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index 34ad449d06e8d..aa08447fd208b 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -22,7 +22,9 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonServiceException; import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.ClientConfiguration; import com.amazonaws.ResponseMetadata; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.regions.Region; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest; @@ -528,9 +530,12 @@ public class AmazonEC2Mock implements AmazonEC2 { public static final String PREFIX_PRIVATE_DNS = "mock-ip-"; public static final String SUFFIX_PRIVATE_DNS = ".ec2.internal"; - List instances = new ArrayList<>(); + final List instances = new ArrayList<>(); + String endpoint; + final AWSCredentialsProvider credentials; + final ClientConfiguration configuration; - public AmazonEC2Mock(int nodes, List> tagsList) { + public AmazonEC2Mock(int nodes, List> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) { if (tagsList != null) { assert tagsList.size() == nodes; } @@ -552,7 +557,8 @@ public AmazonEC2Mock(int nodes, List> tagsList) { instances.add(instance); } - + this.credentials = credentials; + this.configuration = configuration; } @Override @@ -642,7 +648,7 @@ public DescribeInstancesResult describeInstances(DescribeInstancesRequest descri @Override public void setEndpoint(String endpoint) throws IllegalArgumentException { - throw new UnsupportedOperationException("Not supported in mock"); + this.endpoint = endpoint; } @Override @@ -2110,7 +2116,6 @@ public DryRunResult dryRun(DryRunSupporte @Override public void shutdown() { - throw new UnsupportedOperationException("Not supported in mock"); } @Override diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index f157cd6d44b17..a13fe47a632ae 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -26,29 +26,31 @@ import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.ec2.AwsEc2ServiceImpl; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class AwsEc2ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, Settings.EMPTY); + final AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.EMPTY)); assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class)); } public void testAWSCredentialsWithElasticsearchAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.access_key", "aws_key"); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret"); } protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) { - AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, settings).getCredentials(); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, Ec2ClientSettings.getClientSettings(settings)) + .getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } @@ -59,10 +61,10 @@ public void testAWSDefaultConfiguration() { } public void testAWSConfigurationWithAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.proxy.username", "aws_proxy_username"); secureSettings.setString("discovery.ec2.proxy.password", "aws_proxy_password"); - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("discovery.ec2.protocol", "http") .put("discovery.ec2.proxy.host", "aws_proxy_host") .put("discovery.ec2.proxy.port", 8080) @@ -79,7 +81,8 @@ protected void launchAWSConfigurationTest(Settings settings, String expectedProxyUsername, String expectedProxyPassword, int expectedReadTimeout) { - ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, settings); + final ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, + Ec2ClientSettings.getClientSettings(settings)); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -90,16 +93,4 @@ protected void launchAWSConfigurationTest(Settings settings, assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); } - public void testDefaultEndpoint() { - String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, Settings.EMPTY); - assertThat(endpoint, nullValue()); - } - - public void testSpecificEndpoint() { - Settings settings = Settings.builder() - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "ec2.endpoint") - .build(); - String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings); - assertThat(endpoint, is("ec2.endpoint")); - } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java index e29821efda223..0596dd697b2eb 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java @@ -19,18 +19,19 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.Tag; -import org.elasticsearch.common.component.AbstractLifecycleComponent; + import org.elasticsearch.common.settings.Settings; import java.util.List; -public class AwsEc2ServiceMock extends AbstractLifecycleComponent implements AwsEc2Service { +public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { - private int nodes; - private List> tagsList; - private AmazonEC2 client; + private final int nodes; + private final List> tagsList; public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) { super(settings); @@ -39,26 +40,8 @@ public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) } @Override - public synchronized AmazonEC2 client() { - if (client == null) { - client = new AmazonEC2Mock(nodes, tagsList); - } - - return client; + AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + return new AmazonEC2Mock(nodes, tagsList, credentials, configuration); } - @Override - protected void doStart() { - - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() { - - } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java new file mode 100644 index 0000000000000..a92bd243bc9b7 --- /dev/null +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.services.ec2.model.Tag; + +import org.elasticsearch.common.settings.Settings; + +import java.util.List; + +public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin { + + Ec2DiscoveryPluginMock(Settings settings) { + this(settings, 1, null); + } + + public Ec2DiscoveryPluginMock(Settings settings, int nodes, List> tagsList) { + super(settings, new AwsEc2ServiceMock(settings, nodes, tagsList)); + } + +} diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 9bb75c0b09f97..6001ab56d5042 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -19,12 +19,17 @@ package org.elasticsearch.discovery.ec2; +import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + import org.elasticsearch.discovery.ec2.AwsEc2Service; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin; import org.elasticsearch.node.Node; @@ -33,14 +38,14 @@ public class Ec2DiscoveryPluginTests extends ESTestCase { private Settings getNodeAttributes(Settings settings, String url) { - Settings realSettings = Settings.builder() + final Settings realSettings = Settings.builder() .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), true) .put(settings).build(); return Ec2DiscoveryPlugin.getAvailabilityZoneNodeAttributes(realSettings, url); } private void assertNodeAttributes(Settings settings, String url, String expected) { - Settings additional = getNodeAttributes(settings, url); + final Settings additional = getNodeAttributes(settings, url); if (expected == null) { assertTrue(additional.isEmpty()); } else { @@ -49,36 +54,106 @@ private void assertNodeAttributes(Settings settings, String url, String expected } public void testNodeAttributesDisabled() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), false).build(); assertNodeAttributes(settings, "bogus", null); } public void testNodeAttributes() throws Exception { - Path zoneUrl = createTempFile(); + final Path zoneUrl = createTempFile(); Files.write(zoneUrl, Arrays.asList("us-east-1c")); assertNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString(), "us-east-1c"); } public void testNodeAttributesBogusUrl() { - UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> + final UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> getNodeAttributes(Settings.EMPTY, "bogus") ); assertNotNull(e.getCause()); - String msg = e.getCause().getMessage(); + final String msg = e.getCause().getMessage(); assertTrue(msg, msg.contains("no protocol: bogus")); } public void testNodeAttributesEmpty() throws Exception { - Path zoneUrl = createTempFile(); - IllegalStateException e = expectThrows(IllegalStateException.class, () -> + final Path zoneUrl = createTempFile(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> getNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString()) ); assertTrue(e.getMessage(), e.getMessage().contains("no ec2 metadata returned")); } public void testNodeAttributesErrorLenient() throws Exception { - Path dne = createTempDir().resolve("dne"); + final Path dne = createTempDir().resolve("dne"); assertNodeAttributes(Settings.EMPTY, dne.toUri().toURL().toString(), null); } + + public void testDefaultEndpoint() throws IOException { + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { + final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint; + assertThat(endpoint, nullValue()); + } + } + + public void testSpecificEndpoint() throws IOException { + final Settings settings = Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2.endpoint").build(); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings)) { + final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint; + assertThat(endpoint, is("ec2.endpoint")); + } + } + + public void testClientSettingsReInit() throws IOException { + final MockSecureSettings mockSecure1 = new MockSecureSettings(); + mockSecure1.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_1"); + mockSecure1.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_1"); + mockSecure1.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); + mockSecure1.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); + final Settings settings1 = Settings.builder() + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") + .setSecureSettings(mockSecure1) + .build(); + final MockSecureSettings mockSecure2 = new MockSecureSettings(); + mockSecure2.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_2"); + mockSecure2.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_2"); + mockSecure2.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); + mockSecure2.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); + final Settings settings2 = Settings.builder() + .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") + .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") + .setSecureSettings(mockSecure2) + .build(); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) { + try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + // reload secure settings2 + plugin.reload(settings2); + // client is not released, it is still using the old settings + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + } + try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); + } + } + } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index e7986cb878e41..43cc924fadb10 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -39,6 +39,7 @@ import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; @@ -91,11 +92,15 @@ protected List buildDynamicNodes(Settings nodeSettings, int nodes } protected List buildDynamicNodes(Settings nodeSettings, int nodes, List> tagsList) { - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(nodeSettings, nodes, tagsList); - AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, awsEc2Service); - List discoveryNodes = provider.buildDynamicNodes(); - logger.debug("--> nodes found: {}", discoveryNodes); - return discoveryNodes; + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) { + AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service); + List discoveryNodes = provider.buildDynamicNodes(); + logger.debug("--> nodes found: {}", discoveryNodes); + return discoveryNodes; + } catch (IOException e) { + fail("Unexpected IOException"); + return null; + } } public void testDefaultSettings() throws InterruptedException { @@ -315,22 +320,23 @@ protected List fetchDynamicNodes() { public void testGetNodeListCached() throws Exception { Settings.Builder builder = Settings.builder() .put(AwsEc2Service.NODE_CACHE_TIME_SETTING.getKey(), "500ms"); - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); - DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, awsEc2Service) { - @Override - protected List fetchDynamicNodes() { - fetchCount++; - return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1); + try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { + DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, plugin.ec2Service) { + @Override + protected List fetchDynamicNodes() { + fetchCount++; + return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1); + } + }; + for (int i=0; i<3; i++) { + provider.buildDynamicNodes(); } - }; - for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); - } - assertThat(provider.fetchCount, is(1)); - Thread.sleep(1_000L); // wait for cache to expire - for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); + assertThat(provider.fetchCount, is(1)); + Thread.sleep(1_000L); // wait for cache to expire + for (int i=0; i<3; i++) { + provider.buildDynamicNodes(); + } + assertThat(provider.fetchCount, is(2)); } - assertThat(provider.fetchCount, is(2)); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 12a5adecf06e9..d4497c5ee85fd 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -20,47 +20,46 @@ package org.elasticsearch.cloud.azure.blobstore; import com.microsoft.azure.storage.LocationMode; + import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.cluster.metadata.RepositoryMetaData; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; - import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; -import java.util.Locale; import java.util.Map; +import static java.util.Collections.emptyMap; + import static org.elasticsearch.repositories.azure.AzureRepository.Repository; public class AzureBlobStore extends AbstractComponent implements BlobStore { - private final AzureStorageService client; + private final AzureStorageService service; private final String clientName; - private final LocationMode locMode; private final String container; + private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, Settings settings, - AzureStorageService client) throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService service) + throws URISyntaxException, StorageException { super(settings); - this.client = client; this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); - - String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - if (Strings.hasLength(modeStr)) { - this.locMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); - } else { - this.locMode = LocationMode.PRIMARY_ONLY; - } + this.service = service; + // locationMode is set per repository, not per client + this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); + final Map prevSettings = this.service.refreshAndClearCache(emptyMap()); + final Map newSettings = AzureStorageSettings.overrideLocationMode(prevSettings, this.locationMode); + this.service.refreshAndClearCache(newSettings); } @Override @@ -72,7 +71,11 @@ public String toString() { * Gets the configured {@link LocationMode} for the Azure storage requests. */ public LocationMode getLocationMode() { - return locMode; + return locationMode; + } + + public String getClientName() { + return clientName; } @Override @@ -81,12 +84,13 @@ public BlobContainer blobContainer(BlobPath path) { } @Override - public void delete(BlobPath path) { - String keyPath = path.buildAsString(); + public void delete(BlobPath path) throws IOException { + final String keyPath = path.buildAsString(); try { - this.client.deleteFiles(this.clientName, this.locMode, container, keyPath); + service.deleteFiles(clientName, container, keyPath); } catch (URISyntaxException | StorageException e) { - logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + logger.warn("cannot access [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + throw new IOException(e); } } @@ -94,30 +98,29 @@ public void delete(BlobPath path) { public void close() { } - public boolean doesContainerExist() - { - return this.client.doesContainerExist(this.clientName, this.locMode, container); + public boolean containerExist() throws URISyntaxException, StorageException { + return service.doesContainerExist(clientName, container); } public boolean blobExists(String blob) throws URISyntaxException, StorageException { - return this.client.blobExists(this.clientName, this.locMode, container, blob); + return service.blobExists(clientName, container, blob); } public void deleteBlob(String blob) throws URISyntaxException, StorageException { - this.client.deleteBlob(this.clientName, this.locMode, container, blob); + service.deleteBlob(clientName, container, blob); } public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { - return this.client.getInputStream(this.clientName, this.locMode, container, blob); + return service.getInputStream(clientName, container, blob); } public Map listBlobsByPrefix(String keyPath, String prefix) throws URISyntaxException, StorageException { - return this.client.listBlobsByPrefix(this.clientName, this.locMode, container, keyPath, prefix); + return service.listBlobsByPrefix(clientName, container, keyPath, prefix); } public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException, FileAlreadyExistsException { - this.client.writeBlob(this.clientName, this.locMode, container, blobName, inputStream, blobSize); + service.writeBlob(this.clientName, container, blobName, inputStream, blobSize); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 934533f81c99c..4c3195201503d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -19,13 +19,16 @@ package org.elasticsearch.cloud.azure.storage; -import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.blobstore.util.SocketAccess; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import com.microsoft.azure.storage.blob.CloudBlobClient; + +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -35,6 +38,7 @@ import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.util.Map; +import java.util.function.Supplier; /** * Azure Storage Service interface @@ -42,6 +46,24 @@ */ public interface AzureStorageService { + /** + * Creates a {@code CloudBlobClient} on each invocation using the current client + * settings. CloudBlobClient is not thread safe and the settings can change, + * therefore the instance is not cache-able and should only be reused inside a + * thread for logically coupled ops. The {@code OperationContext} is used to + * specify the proxy, but a new context is *required* for each call. + */ + Tuple> client(String clientName); + + /** + * Updates settings for building clients. Any client cache is cleared. Future + * client requests will use the new refreshed settings. + * + * @param clientsSettings the settings for new clients + * @return the old settings + */ + Map refreshAndClearCache(Map clientsSettings); + ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); @@ -61,26 +83,25 @@ final class Storage { Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope, Property.Deprecated); } - boolean doesContainerExist(String account, LocationMode mode, String container); + boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException; - void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; + void removeContainer(String account, String container) throws URISyntaxException, StorageException; - void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; + void createContainer(String account, String container) throws URISyntaxException, StorageException; - void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException; + void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException; - boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + boolean blobExists(String account, String container, String blob) throws URISyntaxException, StorageException; - void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException; - InputStream getInputStream(String account, LocationMode mode, String container, String blob) - throws URISyntaxException, StorageException, IOException; + InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException; - Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) - throws URISyntaxException, StorageException; + Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException; - void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws - URISyntaxException, StorageException, FileAlreadyExistsException; + void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) + throws URISyntaxException, StorageException, FileAlreadyExistsException; static InputStream giveSocketPermissionsToStream(InputStream stream) { return new InputStream() { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index 597c9813b6e92..7ea4dcca5fcbb 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -21,7 +21,6 @@ import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; @@ -36,205 +35,141 @@ import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cloud.azure.blobstore.util.SocketAccess; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.repositories.RepositoryException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; +import java.security.InvalidKeyException; import java.nio.file.FileAlreadyExistsException; import java.util.EnumSet; -import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; -public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { - - final Map storageSettings; - final Map deprecatedStorageSettings; +import static java.util.Collections.emptyMap; - final Map clients; +public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { public AzureStorageServiceImpl(Settings settings, Map regularStorageSettings) { super(settings); - - if (regularStorageSettings.isEmpty()) { - this.storageSettings = new HashMap<>(); - // We have deprecated settings so we need to migrate them to the new implementation - Tuple> storageSettingsMapTuple = AzureStorageSettings.loadLegacy(settings); - deprecatedStorageSettings = storageSettingsMapTuple.v2(); - if (storageSettingsMapTuple.v1() != null) { - if (storageSettingsMapTuple.v1().getName().equals("default") == false) { - // We add the primary configuration to the list of all settings with its deprecated name in case someone is - // forcing a specific configuration name when creating the repository instance - deprecatedStorageSettings.put(storageSettingsMapTuple.v1().getName(), storageSettingsMapTuple.v1()); - } - // We add the primary configuration to the list of all settings as the "default" one - deprecatedStorageSettings.put("default", storageSettingsMapTuple.v1()); - } else { - // If someone did not register any settings or deprecated settings, they - // basically can't use the plugin - throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); - } - } else { - this.storageSettings = regularStorageSettings; - this.deprecatedStorageSettings = new HashMap<>(); - } - - this.clients = new HashMap<>(); - - logger.debug("starting azure storage client instance"); - - // We register all regular azure clients - for (Map.Entry azureStorageSettingsEntry : this.storageSettings.entrySet()) { - final String clientName = azureStorageSettingsEntry.getKey(); - createClient(clientName, azureStorageSettingsEntry.getValue()); - } - - // We register all deprecated azure clients - for (Map.Entry azureStorageSettingsEntry : this.deprecatedStorageSettings.entrySet()) { - final String clientName = azureStorageSettingsEntry.getKey(); - createClient(clientName, azureStorageSettingsEntry.getValue()); - } + // eagerly load client settings so that secure settings are read + final Map clientsSettings = AzureStorageSettings.load(settings); + refreshAndClearCache(clientsSettings); } - private void createClient(final String clientName, final AzureStorageSettings azureStorageSettings) { - try { - String storageConnectionString = - "DefaultEndpointsProtocol=https;" - + "AccountName=" + azureStorageSettings.getAccount() + ";" - + "AccountKey=" + azureStorageSettings.getKey(); - - String endpointSuffix = azureStorageSettings.getEndpointSuffix(); - if (endpointSuffix != null && !endpointSuffix.isEmpty()) { - storageConnectionString += ";EndpointSuffix=" + endpointSuffix; - } - // Retrieve storage account from connection-string. - CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); - - // Create the blob client. - CloudBlobClient client = storageAccount.createCloudBlobClient(); + // 'package' for testing + volatile Map storageSettings = emptyMap(); - // Register the client - this.clients.put(azureStorageSettings.getAccount(), client); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("Can not create azure storage client [{}]", clientName), e); - } + public AzureStorageServiceImpl(Settings settings) { + super(settings); + // eagerly load client settings so that secure settings are read + final Map clientsSettings = AzureStorageSettings.load(settings); + refreshAndClearCache(clientsSettings); } - CloudBlobClient getSelectedClient(String account, LocationMode mode) { - AzureStorageSettings azureStorageSettings = this.storageSettings.get(account); + @Override + public Tuple> client(String clientName) { + final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { - // We can't find a client that has been registered using regular settings so we try deprecated client - azureStorageSettings = this.deprecatedStorageSettings.get(account); - if (azureStorageSettings == null) { - // We did not get an account. That's bad. - if (Strings.hasLength(account)) { - throw new IllegalArgumentException("Unable to find Azure client"); - } - throw new IllegalArgumentException("Can not find primary/secondary client using deprecated settings. " + - "Check your elasticsearch.yml."); - } + throw new SettingsException("Unable to find client with name [" + clientName + "]"); } - - CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount()); - if (client == null) { - throw new IllegalArgumentException("Can not find an Azure client"); + try { + return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings)); + } catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) { + throw new SettingsException("Invalid azure client settings with name [" + clientName + "]", e); } + } - // NOTE: for now, just set the location mode in case it is different; - // only one mode per storage account can be active at a time - client.getDefaultRequestOptions().setLocationMode(mode); - - // Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default) - if (azureStorageSettings.getTimeout().getSeconds() > 0) { - try { - int timeout = (int) azureStorageSettings.getTimeout().getMillis(); - client.getDefaultRequestOptions().setTimeoutIntervalInMs(timeout); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Can not convert [" + azureStorageSettings.getTimeout() + - "]. It can not be longer than 2,147,483,647ms."); + protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final CloudBlobClient client = createClient(azureStorageSettings); + // Set timeout option if the user sets cloud.azure.storage.timeout or + // cloud.azure.storage.xxx.timeout (it's negative by default) + final long timeout = azureStorageSettings.getTimeout().getMillis(); + if (timeout > 0) { + if (timeout > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); } + client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); } - // We define a default exponential retry policy - client.getDefaultRequestOptions().setRetryPolicyFactory( - new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); - + client.getDefaultRequestOptions() + .setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); + client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); return client; } - // Package private for testing in 6.x only: not needed anymore after - OperationContext generateOperationContext(String clientName) { - OperationContext context = new OperationContext(); - AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); - if (azureStorageSettings == null) { - azureStorageSettings = deprecatedStorageSettings.get(clientName); - } - - if (azureStorageSettings.getProxy() != null) { - context.setProxy(azureStorageSettings.getProxy()); - } + protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final String connectionString = azureStorageSettings.buildConnectionString(); + return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); + } + protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + final OperationContext context = new OperationContext(); + context.setProxy(azureStorageSettings.getProxy()); return context; } @Override - public boolean doesContainerExist(String account, LocationMode mode, String container) { - try { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account))); - } catch (Exception e) { - logger.error("can not access container [{}]", container); - } - return false; + public Map refreshAndClearCache(Map clientsSettings) { + final Map prevSettings = this.storageSettings; + this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + // clients are built lazily by {@link client(String)} + return prevSettings; } @Override - public void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - logger.trace("removing container [{}]", container); - SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, generateOperationContext(account))); + public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } @Override - public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException { + public void removeContainer(String account, String container) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("removing container [{}]", container)); + SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, client.v2().get())); + } + + @Override + public void createContainer(String account, String container) throws URISyntaxException, StorageException { try { - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - logger.trace("creating container [{}]", container); - SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, generateOperationContext(account))); - } catch (IllegalArgumentException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("fails creating container [{}]", container), e); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("creating container [{}]", container)); + SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, client.v2().get())); + } catch (final IllegalArgumentException e) { + logger.trace(() -> new ParameterizedMessage("failed creating container [{}]", container), e); throw new RepositoryException(container, e.getMessage(), e); } } @Override - public void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException { - logger.trace("delete files container [{}], path [{}]", container, path); - - // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); + public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + // container name must be lower case. + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - // We list the blobs using a flat blob listing mode - for (ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - generateOperationContext(account))) { - String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri()); - deleteBlob(account, mode, container, blobName); + // list the blobs using a flat blob listing mode + for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, + client.v2().get())) { + final String blobName = blobNameFromUri(blobItem.getUri()); + logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); + // don't call {@code #deleteBlob}, use the same client + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); } } }); @@ -246,82 +181,81 @@ public void deleteFiles(String account, LocationMode mode, String container, Str * @param uri URI to parse * @return The blob name relative to the container */ - public static String blobNameFromUri(URI uri) { - String path = uri.getPath(); - + static String blobNameFromUri(URI uri) { + final String path = uri.getPath(); // We remove the container name from the path // The 3 magic number cames from the fact if path is /container/path/to/myfile // First occurrence is empty "/" // Second occurrence is "container // Last part contains "path/to/myfile" which is what we want to get - String[] splits = path.split("/", 3); - + final String[] splits = path.split("/", 3); // We return the remaining end of the string return splits[2]; } @Override - public boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { + public boolean blobExists(String account, String container, String blob) + throws URISyntaxException, StorageException { // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { - CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(() -> azureBlob.exists(null, null, generateOperationContext(account))); - } - - return false; + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> { + if (blobContainer.exists(null, null, client.v2().get())) { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + return azureBlob.exists(null, null, client.v2().get()); + } + return false; + }); } @Override - public void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { - logger.trace("delete blob for container [{}], blob [{}]", container, blob); - + public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + final Tuple> client = client(account); // Container name must be lower case. - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { - logger.trace("container [{}]: blob [{}] found. removing.", container, blob); - CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - SocketAccess.doPrivilegedVoidException(() -> azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, - generateOperationContext(account))); - } + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob)); + SocketAccess.doPrivilegedVoidException(() -> { + if (blobContainer.exists(null, null, client.v2().get())) { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); + } + }); } - @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { - logger.trace("reading container [{}], blob [{}]", container, blob); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); - BlobInputStream stream = SocketAccess.doPrivilegedException(() -> - blockBlobReference.openInputStream(null, null, generateOperationContext(account))); - return AzureStorageService.giveSocketPermissionsToStream(stream); + public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, + StorageException { + final Tuple> client = client(account); + final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); + final BlobInputStream is = SocketAccess.doPrivilegedException(() -> + blockBlobReference.openInputStream(null, null, client.v2().get())); + return AzureStorageService.giveSocketPermissionsToStream(is); } @Override - public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException { + public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException { // NOTE: this should be here: if (prefix == null) prefix = ""; // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! - - logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); - MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); + final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, - enumBlobListingDetails, null, generateOperationContext(account))) { - URI uri = blobItem.getUri(); - logger.trace("blob url [{}]", uri); - + for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, + enumBlobListingDetails, null, client.v2().get())) { + final URI uri = blobItem.getUri(); + logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / - String blobPath = uri.getPath().substring(1 + container.length() + 1); - BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); - String name = blobPath.substring(keyPath.length()); - logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); + final String blobPath = uri.getPath().substring(1 + container.length() + 1); + final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); + final String name = blobPath.substring(keyPath.length()); + logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } @@ -330,22 +264,23 @@ enumBlobListingDetails, null, generateOperationContext(account))) { } @Override - public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException, FileAlreadyExistsException { - logger.trace("writeBlob({}, stream, {})", blobName, blobSize); - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); try { SocketAccess.doPrivilegedVoidException(() -> blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(), - null, generateOperationContext(account))); - } catch (StorageException se) { + null, client.v2().get())); + } catch (final StorageException se) { if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { throw new FileAlreadyExistsException(blobName, null, se.getMessage()); } throw se; } - logger.trace("writeBlob({}, stream, {}) - done", blobName, blobSize); + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); } + } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index ca333cf0c5ec2..58cd5172e6859 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -19,10 +19,12 @@ package org.elasticsearch.cloud.azure.storage; +import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.RetryPolicy; import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -31,7 +33,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; - import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -88,19 +89,18 @@ public final class AzureStorageSettings { @Deprecated - public static final Setting DEPRECATED_TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", + public static final AffixSetting DEPRECATED_TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Property.NodeScope, Property.Deprecated)); @Deprecated - public static final Setting DEPRECATED_ACCOUNT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "account", + public static final AffixSetting DEPRECATED_ACCOUNT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "account", (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); @Deprecated - public static final Setting DEPRECATED_KEY_SETTING = Setting.affixKeySetting(Storage.PREFIX, "key", + public static final AffixSetting DEPRECATED_KEY_SETTING = Setting.affixKeySetting(Storage.PREFIX, "key", (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); @Deprecated - public static final Setting DEPRECATED_DEFAULT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "default", + public static final AffixSetting DEPRECATED_DEFAULT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "default", (key) -> Setting.boolSetting(key, false, Property.NodeScope, Property.Deprecated)); - @Deprecated private final String name; private final String account; @@ -111,9 +111,36 @@ public final class AzureStorageSettings { private final boolean activeByDefault; private final int maxRetries; private final Proxy proxy; + private final LocationMode locationMode; + // copy-constructor + private AzureStorageSettings(String name, String account, String key, String endpointSuffix, TimeValue timeout, boolean activeByDefault, + int maxRetries, Proxy proxy, LocationMode locationMode) { + this.name = name; + this.account = account; + this.key = key; + this.endpointSuffix = endpointSuffix; + this.timeout = timeout; + this.activeByDefault = activeByDefault; + this.maxRetries = maxRetries; + this.proxy = proxy; + this.locationMode = locationMode; + } - public AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, + @Deprecated + public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault, int maxRetries) { + this.name = name; + this.account = account; + this.key = key; + this.endpointSuffix = null; + this.timeout = timeout; + this.activeByDefault = activeByDefault; + this.maxRetries = maxRetries; + this.proxy = null; + this.locationMode = LocationMode.PRIMARY_ONLY; + } + + AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, Integer proxyPort) { this.name = null; this.account = account; @@ -122,13 +149,12 @@ public AzureStorageSettings(String account, String key, String endpointSuffix, T this.timeout = timeout; this.activeByDefault = false; this.maxRetries = maxRetries; - // Register the proxy if we have any // Validate proxy settings - if (proxyType.equals(Proxy.Type.DIRECT) && (proxyPort != 0 || Strings.hasText(proxyHost))) { + if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined."); } - if (proxyType.equals(Proxy.Type.DIRECT) == false && (proxyPort == 0 || Strings.isEmpty(proxyHost))) { + if ((proxyType.equals(Proxy.Type.DIRECT) == false) && ((proxyPort == 0) || Strings.isEmpty(proxyHost))) { throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined."); } @@ -137,22 +163,11 @@ public AzureStorageSettings(String account, String key, String endpointSuffix, T } else { try { proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort)); - } catch (UnknownHostException e) { + } catch (final UnknownHostException e) { throw new SettingsException("Azure proxy host is unknown.", e); } } - } - - @Deprecated - public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault, int maxRetries) { - this.name = name; - this.account = account; - this.key = key; - this.endpointSuffix = null; - this.timeout = timeout; - this.activeByDefault = activeByDefault; - this.maxRetries = maxRetries; - this.proxy = null; + this.locationMode = LocationMode.PRIMARY_ONLY; } @Deprecated @@ -189,17 +204,34 @@ public Proxy getProxy() { return proxy; } + public String buildConnectionString() { + final StringBuilder connectionStringBuilder = new StringBuilder(); + connectionStringBuilder.append("DefaultEndpointsProtocol=https") + .append(";AccountName=") + .append(account) + .append(";AccountKey=") + .append(key); + if (Strings.hasText(endpointSuffix)) { + connectionStringBuilder.append(";EndpointSuffix=").append(endpointSuffix); + } + return connectionStringBuilder.toString(); + } + + public LocationMode getLocationMode() { + return locationMode; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); - sb.append("name='").append(name).append('\''); - sb.append(", account='").append(account).append('\''); + sb.append("account='").append(account).append('\''); sb.append(", key='").append(key).append('\''); sb.append(", activeByDefault='").append(activeByDefault).append('\''); sb.append(", timeout=").append(timeout); sb.append(", endpointSuffix='").append(endpointSuffix).append('\''); sb.append(", maxRetries=").append(maxRetries); sb.append(", proxy=").append(proxy); + sb.append(", locationMode='").append(locationMode).append('\''); sb.append('}'); return sb.toString(); } @@ -216,23 +248,46 @@ public static Tuple> loa } /** - * Parses settings and read all settings available under azure.client.* + * Parse and read all settings available under the azure.client.* namespace * @param settings settings to parse * @return All the named configurations */ public static Map load(Settings settings) { + final Map regularStorageSettings = loadRegular(settings); + final Tuple> storageSettingsMapTuple = AzureStorageSettings + .loadLegacy(settings); + final Map deprecatedStorageSettings = storageSettingsMapTuple.v2(); + final Map storageSettings; + if (regularStorageSettings.isEmpty() == false) { + storageSettings = regularStorageSettings; + } else { + storageSettings = storageSettingsMapTuple.v2(); + if (storageSettingsMapTuple.v1() != null) { + if (storageSettingsMapTuple.v1().getName().equals("default") == false) { + // We add the primary configuration to the list of all settings with its deprecated name in case someone is + // forcing a specific configuration name when creating the repository instance + deprecatedStorageSettings.put(storageSettingsMapTuple.v1().getName(), storageSettingsMapTuple.v1()); + } + // We add the primary configuration to the list of all settings as the "default" one + deprecatedStorageSettings.put("default", storageSettingsMapTuple.v1()); + } + } + return storageSettings; + } + + static Map loadRegular(Settings settings) { // Get the list of existing named configurations - Map storageSettings = new HashMap<>(); - for (String clientName : ACCOUNT_SETTING.getNamespaces(settings)) { + final Map storageSettings = new HashMap<>(); + for (final String clientName : ACCOUNT_SETTING.getNamespaces(settings)) { storageSettings.put(clientName, getClientSettings(settings, clientName)); } - - if (storageSettings.containsKey("default") == false && storageSettings.isEmpty() == false) { + if (false == storageSettings.containsKey("default") && false == storageSettings.isEmpty()) { // in case no setting named "default" has been set, let's define our "default" // as the first named config we get - AzureStorageSettings defaultSettings = storageSettings.values().iterator().next(); + final AzureStorageSettings defaultSettings = storageSettings.values().iterator().next(); storageSettings.put("default", defaultSettings); } + assert storageSettings.containsKey("default") || storageSettings.isEmpty() : "always have 'default' if any"; return Collections.unmodifiableMap(storageSettings); } @@ -272,13 +327,13 @@ private static List createStorageSettingsDeprecated(Settin private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } public static T getValue(Settings settings, String groupName, Setting setting) { - Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); - String fullKey = k.toConcreteKey(groupName).toString(); + final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); + final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); } @@ -322,4 +377,16 @@ private static Map getSecondaries(List overrideLocationMode(Map clientsSettings, + LocationMode locationMode) { + final MapBuilder mapBuilder = new MapBuilder<>(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + final AzureStorageSettings azureSettings = new AzureStorageSettings(entry.getValue().name, entry.getValue().account, + entry.getValue().key, entry.getValue().endpointSuffix, entry.getValue().timeout, entry.getValue().activeByDefault, + entry.getValue().maxRetries, entry.getValue().proxy, locationMode); + mapBuilder.put(entry.getKey(), azureSettings); + } + return mapBuilder.immutableMap(); + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index abaac4fb9a221..668830a608e27 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -24,9 +24,11 @@ import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.azure.AzureRepository; @@ -39,24 +41,20 @@ /** * A plugin to add a repository type that writes to and from the Azure cloud storage service. */ -public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin { +public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { - private final Map clientsSettings; - - // overridable for tests - protected AzureStorageService createStorageService(Settings settings) { - return new AzureStorageServiceImpl(settings, clientsSettings); - } + // public for testing + public final AzureStorageService azureStoreService; public AzureRepositoryPlugin(Settings settings) { // eagerly load client settings so that secure settings are read - clientsSettings = AzureStorageSettings.load(settings); + this.azureStoreService = new AzureStorageServiceImpl(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, createStorageService(env.settings()))); + (metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, azureStoreService)); } @Override @@ -78,4 +76,13 @@ public List getSettingsFilter() { // Cloud storage API settings using a pattern needed to be hidden return Arrays.asList(AzureStorageService.Storage.PREFIX + "*.account", AzureStorageService.Storage.PREFIX + "*.key"); } + + public void reload(Settings settings) { + // secure settings should be readable + final Map clientsSettings = AzureStorageSettings.load(settings); + if (clientsSettings.isEmpty()) { + throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); + } + azureStoreService.refreshAndClearCache(clientsSettings); + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 2188bc3e9f290..06e6b93d67255 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -19,10 +19,12 @@ package org.elasticsearch.repositories.azure; +import org.apache.logging.log4j.message.ParameterizedMessage; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; import org.elasticsearch.cloud.azure.storage.AzureStorageService; + import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -35,6 +37,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotCreationException; import org.elasticsearch.snapshots.SnapshotId; import java.io.IOException; @@ -62,19 +65,19 @@ public class AzureRepository extends BlobStoreRepository { public static final String TYPE = "azure"; public static final class Repository { - @Deprecated // Replaced by client public static final Setting ACCOUNT_SETTING = new Setting<>("account", "default", Function.identity(), Property.NodeScope, Property.Deprecated); public static final Setting CLIENT_NAME = new Setting<>("client", ACCOUNT_SETTING, Function.identity()); - public static final Setting CONTAINER_SETTING = new Setting<>("container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope); public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = new Setting<>("location_mode", + s -> LocationMode.PRIMARY_ONLY.toString(), s -> LocationMode.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope); public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); + public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } private final AzureBlobStore blobStore; @@ -83,45 +86,32 @@ public static final class Repository { private final boolean compress; private final boolean readonly; - public AzureRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) - throws IOException, URISyntaxException, StorageException { + public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, + AzureStorageService storageService) throws IOException, URISyntaxException, StorageException { super(metadata, environment.settings(), namedXContentRegistry); - - blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); - String container = Repository.CONTAINER_SETTING.get(metadata.settings()); + this.blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); - String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - Boolean forcedReadonly = metadata.settings().getAsBoolean("readonly", null); // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. // For secondary_only setting, the repository should be read only - if (forcedReadonly == null) { - if (Strings.hasLength(modeStr)) { - LocationMode locationMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); - this.readonly = locationMode == LocationMode.SECONDARY_ONLY; - } else { - this.readonly = false; - } + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); } else { - readonly = forcedReadonly; + this.readonly = this.blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY; } - - String basePath = Repository.BASE_PATH_SETTING.get(metadata.settings()); - + final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); if (Strings.hasLength(basePath)) { // Remove starting / if any - basePath = Strings.trimLeadingCharacter(basePath, '/'); BlobPath path = new BlobPath(); - for(String elem : basePath.split("/")) { + for(final String elem : basePath.split("/")) { path = path.add(elem); } this.basePath = path; } else { this.basePath = BlobPath.cleanPath(); } - logger.debug("using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - container, chunkSize, compress, basePath); + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath)); } /** @@ -155,9 +145,13 @@ protected ByteSizeValue chunkSize() { @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { - if (blobStore.doesContainerExist() == false) { - throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + - " creating an azure snapshot repository backed by it."); + try { + if (blobStore.containerExist() == false) { + throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + + " creating an azure snapshot repository backed by it."); + } + } catch (URISyntaxException | StorageException e) { + throw new SnapshotCreationException(metadata.name(), snapshotId, e); } super.initializeSnapshot(snapshotId, indices, clusterMetadata); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java index cdfb94b0aadbd..dc666bfce3e8d 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java @@ -19,11 +19,14 @@ package org.elasticsearch.cloud.azure.storage; -import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobClient; + import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.Streams; @@ -40,6 +43,9 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +import static java.util.Collections.emptyMap; /** * In memory storage for unit tests @@ -53,42 +59,44 @@ public AzureStorageServiceMock() { } @Override - public boolean doesContainerExist(String account, LocationMode mode, String container) { + public boolean doesContainerExist(String account, String container) { return true; } @Override - public void removeContainer(String account, LocationMode mode, String container) { + public void removeContainer(String account, String container) { } @Override - public void createContainer(String account, LocationMode mode, String container) { + public void createContainer(String account, String container) { } @Override - public void deleteFiles(String account, LocationMode mode, String container, String path) { + public void deleteFiles(String account, String container, String path) { + final Map blobs = listBlobsByPrefix(account, container, path, null); + blobs.keySet().forEach(key -> deleteBlob(account, container, key)); } @Override - public boolean blobExists(String account, LocationMode mode, String container, String blob) { + public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, LocationMode mode, String container, String blob) { + public void deleteBlob(String account, String container, String blob) { blobs.remove(blob); } @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws IOException { - if (!blobExists(account, mode, container, blob)) { + public InputStream getInputStream(String account, String container, String blob) throws IOException { + if (!blobExists(account, container, blob)) { throw new NoSuchFileException("missing blob [" + blob + "]"); } return AzureStorageService.giveSocketPermissionsToStream(new PermissionRequiringInputStream(blobs.get(blob).toByteArray())); } @Override - public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) { + public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) { MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); blobs.forEach((String blobName, ByteArrayOutputStream bos) -> { final String checkBlob; @@ -106,7 +114,7 @@ public Map listBlobsByPrefix(String account, LocationMode } @Override - public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException, FileAlreadyExistsException { if (blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); @@ -166,4 +174,14 @@ public synchronized int read(byte[] b, int off, int len) { return super.read(b, off, len); } } + + @Override + public Tuple> client(String clientName) { + return null; + } + + @Override + public Map refreshAndClearCache(Map clientsSettings) { + return emptyMap(); + } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index 7677cf7c4cd42..5d515494fb486 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cloud.azure.storage; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.core.Base64; @@ -27,8 +26,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -36,7 +37,6 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; @@ -68,17 +68,10 @@ public class AzureStorageServiceTests extends ESTestCase { .build(); public void testReadSecuredSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("azure.client.azure1.account", "myaccount1"); - secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); - secureSettings.setString("azure.client.azure2.account", "myaccount2"); - secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2")); - secureSettings.setString("azure.client.azure3.account", "myaccount3"); - secureSettings.setString("azure.client.azure3.key", encodeKey("mykey3")); - Settings settings = Settings.builder().setSecureSettings(secureSettings) + final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build(); - Map loadedSettings = AzureStorageSettings.load(settings); + final Map loadedSettings = AzureStorageSettings.load(settings); assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default")); assertThat(loadedSettings.get("azure1").getEndpointSuffix(), isEmptyString()); @@ -86,111 +79,177 @@ public void testReadSecuredSettings() { assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); } - public void testCreateClientWithEndpointSuffix() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("azure.client.azure1.account", "myaccount1"); - secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); - secureSettings.setString("azure.client.azure2.account", "myaccount2"); - secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2")); - Settings settings = Settings.builder().setSecureSettings(secureSettings) + public void testCreateClientWithEndpointSuffix() throws IOException { + final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); + assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + final CloudBlobClient client2 = azureStorageService.client("azure2").v1(); + assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); + } + } - CloudBlobClient client2 = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); - assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); + public void testReinitClientSettings() throws IOException { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setString("azure.client.azure1.account", "myaccount11"); + secureSettings1.setString("azure.client.azure1.key", encodeKey("mykey11")); + secureSettings1.setString("azure.client.azure2.account", "myaccount12"); + secureSettings1.setString("azure.client.azure2.key", encodeKey("mykey12")); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setString("azure.client.azure1.account", "myaccount21"); + secureSettings2.setString("azure.client.azure1.key", encodeKey("mykey21")); + secureSettings2.setString("azure.client.azure3.account", "myaccount23"); + secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); + final CloudBlobClient client12 = azureStorageService.client("azure2").v1(); + assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + // client 3 is missing + final SettingsException e1 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure3")); + assertThat(e1.getMessage(), is("Unable to find client with name [azure3]")); + // update client settings + plugin.reload(settings2); + // old client 1 not changed + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); + // new client 1 is changed + final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); + assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount21.blob.core.windows.net")); + // old client 2 not changed + assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + // new client2 is gone + final SettingsException e2 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure2")); + assertThat(e2.getMessage(), is("Unable to find client with name [azure2]")); + // client 3 emerged + final CloudBlobClient client23 = azureStorageService.client("azure3").v1(); + assertThat(client23.getEndpoint().toString(), equalTo("https://myaccount23.blob.core.windows.net")); + } } - public void testGetSelectedClientWithNoPrimaryAndSecondary() { - try { - new AzureStorageServiceImpl(Settings.EMPTY, Collections.emptyMap()); - fail("we should have raised an IllegalArgumentException"); - } catch (IllegalArgumentException e) { + public void testReinitClientEmptySettings() throws IOException { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + // reinit with empty settings + final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reload(Settings.EMPTY)); assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); + // existing client untouched + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + // new client also untouched + final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); + assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + } + } + + public void testReinitClientWrongSettings() throws IOException { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setString("azure.client.azure1.account", "myaccount1"); + secureSettings1.setString("azure.client.azure1.key", encodeKey("mykey11")); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setString("azure.client.azure1.account", "myaccount1"); + // missing key + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + plugin.reload(settings2); + // existing client untouched + assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure1")); + assertThat(e.getMessage(), is("Invalid azure client settings with name [azure1]")); } } public void testGetSelectedClientNonExisting() { - AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); - }); - assertThat(e.getMessage(), is("Unable to find Azure client")); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); + assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } public void testGetSelectedClientGlobalTimeout() { - Settings timeoutSettings = Settings.builder() + final Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put(AzureStorageService.Storage.TIMEOUT_SETTING.getKey(), "10s") .put("azure.client.azure3.timeout", "30s") .build(); - AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(10 * 1000)); - CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); + final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); assertSettingDeprecationsAndWarnings(new Setting[]{AzureStorageService.Storage.TIMEOUT_SETTING}); } public void testGetSelectedClientDefaultTimeout() { - Settings timeoutSettings = Settings.builder() + final Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); - CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); + final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); } public void testGetSelectedClientNoTimeout() { - AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); } public void testGetSelectedClientBackoffPolicyNbRetries() { - Settings timeoutSettings = Settings.builder() + final Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.max_retries", 7) .build(); - AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings); + final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); } public void testNoProxy() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - AzureStorageServiceImpl mock = createAzureService(settings); + final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); } public void testProxyHttp() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - AzureStorageServiceImpl mock = createAzureService(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -200,7 +259,7 @@ public void testProxyHttp() throws UnknownHostException { } public void testMultipleProxies() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) @@ -209,12 +268,12 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - AzureStorageServiceImpl mock = createAzureService(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); - Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); + final Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); assertThat(azure2Proxy, notNullValue()); assertThat(azure2Proxy.type(), is(Proxy.Type.HTTP)); assertThat(azure2Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081))); @@ -222,14 +281,14 @@ public void testMultipleProxies() throws UnknownHostException { } public void testProxySocks() throws UnknownHostException { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - AzureStorageServiceImpl mock = createAzureService(settings); - Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); @@ -238,47 +297,46 @@ public void testProxySocks() throws UnknownHostException { } public void testProxyNoHost() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - - SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoPort() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } public void testProxyNoType() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.host", "127.0.0.1") .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } public void testProxyWrongHost() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .put("azure.client.azure1.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky") .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } @@ -297,11 +355,11 @@ public void testBlobNameFromUri() throws URISyntaxException { @Deprecated public void testGetSelectedClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = createAzureService(Settings.builder() + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(Settings.builder() .put("cloud.azure.storage.azure1.account", "myaccount1") .put("cloud.azure.storage.azure1.key", encodeKey("mykey1")) .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + CloudBlobClient client = azureStorageService.client("azure1").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount1.blob.core.windows.net"))); assertSettingDeprecationsAndWarnings(new Setting[]{ getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), @@ -311,11 +369,11 @@ public void testGetSelectedClientWithNoSecondary() { @Deprecated public void testGetDefaultClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = createAzureService(Settings.builder() + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(Settings.builder() .put("cloud.azure.storage.azure1.account", "myaccount1") .put("cloud.azure.storage.azure1.key", encodeKey("mykey1")) .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); + CloudBlobClient client = azureStorageService.client("default").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount1.blob.core.windows.net"))); assertSettingDeprecationsAndWarnings(new Setting[]{ getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), @@ -325,42 +383,42 @@ public void testGetDefaultClientWithNoSecondary() { @Deprecated public void testGetSelectedClientPrimary() { - AzureStorageServiceImpl azureStorageService = createAzureService(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(deprecatedSettings); + CloudBlobClient client = azureStorageService.client("azure1").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount1.blob.core.windows.net"))); assertDeprecatedWarnings(); } @Deprecated public void testGetSelectedClientSecondary1() { - AzureStorageServiceImpl azureStorageService = createAzureService(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(deprecatedSettings); + CloudBlobClient client = azureStorageService.client("azure2").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount2.blob.core.windows.net"))); assertDeprecatedWarnings(); } @Deprecated public void testGetSelectedClientSecondary2() { - AzureStorageServiceImpl azureStorageService = createAzureService(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(deprecatedSettings); + CloudBlobClient client = azureStorageService.client("azure3").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount3.blob.core.windows.net"))); assertDeprecatedWarnings(); } @Deprecated public void testGetDefaultClientWithPrimaryAndSecondaries() { - AzureStorageServiceImpl azureStorageService = createAzureService(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(deprecatedSettings); + CloudBlobClient client = azureStorageService.client("default").v1(); assertThat(client.getEndpoint(), is(URI.create("https://myaccount1.blob.core.windows.net"))); assertDeprecatedWarnings(); } @Deprecated public void testGenerateOperationContext() { - AzureStorageServiceImpl azureStorageService = createAzureService(deprecatedSettings); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(deprecatedSettings); // This was producing a NPE when calling any operation with deprecated settings. // See https://github.com/elastic/elasticsearch/issues/28299 - azureStorageService.generateOperationContext("default"); + azureStorageService.client("default").v2().get(); assertDeprecatedWarnings(); } @@ -378,7 +436,7 @@ private void assertDeprecatedWarnings() { } private static MockSecureSettings buildSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); secureSettings.setString("azure.client.azure2.account", "myaccount2"); @@ -392,10 +450,6 @@ private static Settings buildSettings() { return Settings.builder().setSecureSettings(buildSecureSettings()).build(); } - private static AzureStorageServiceImpl createAzureService(final Settings settings) { - return new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); - } - private static String encodeKey(final String value) { return Base64.encode(value.getBytes(StandardCharsets.UTF_8)); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java index 9e2febb987416..0c3117602effa 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; @@ -68,6 +69,15 @@ public void testSettingsFiltering() throws IOException { String filteredSettingsString = Strings.toString(xContentBuilder); filteredSettings = Settings.builder().loadFromSource(filteredSettingsString, xContentBuilder.contentType()).build(); assertThat(filteredSettings.keySet(), contains("cloud.azure.storage.azure1.default")); + + assertSettingDeprecationsAndWarnings( + new Setting[] { AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING.getConcreteSettingForNamespace("azure1"), + AzureStorageSettings.DEPRECATED_KEY_SETTING.getConcreteSettingForNamespace("azure1"), + AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING.getConcreteSettingForNamespace("azure2"), + AzureStorageSettings.DEPRECATED_KEY_SETTING.getConcreteSettingForNamespace("azure2"), + AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING.getConcreteSettingForNamespace("azure3"), + AzureStorageSettings.DEPRECATED_KEY_SETTING.getConcreteSettingForNamespace("azure3"), + AzureStorageSettings.DEPRECATED_DEFAULT_SETTING.getConcreteSettingForNamespace("azure1") }); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 4936ba7806a69..c87152c44a08e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -35,6 +35,7 @@ import java.net.URISyntaxException; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class AzureRepositorySettingsTests extends ESTestCase { @@ -45,7 +46,7 @@ private AzureRepository azureRepository(Settings settings) throws StorageExcepti .put(settings) .build(); return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), - TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null); + TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class)); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 87166e67c3e04..0906c86e22baa 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -19,9 +19,7 @@ package org.elasticsearch.repositories.azure; - import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -29,8 +27,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -81,9 +77,9 @@ private static Settings.Builder generateMockSettings() { return Settings.builder().setSecureSettings(generateMockSecureSettings()); } + @SuppressWarnings("resource") private static AzureStorageService getAzureStorageService() { - return new AzureStorageServiceImpl(generateMockSettings().build(), - AzureStorageSettings.load(generateMockSettings().build())); + return new AzureRepositoryPlugin(generateMockSettings().build()).azureStoreService; } @Override @@ -98,7 +94,7 @@ private static String getContainerName() { * there mustn't be a hyphen between the 2 concatenated numbers * (can't have 2 consecutives hyphens on Azure containers) */ - String testName = "snapshot-itest-" + final String testName = "snapshot-itest-" .concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -127,7 +123,7 @@ private static void createTestContainer(String containerName) throws Exception { // It could happen that we run this test really close to a previous one // so we might need some time to be able to create the container assertBusy(() -> { - getAzureStorageService().createContainer("default", LocationMode.PRIMARY_ONLY, containerName); + getAzureStorageService().createContainer("default", containerName); }, 30, TimeUnit.SECONDS); } @@ -136,7 +132,7 @@ private static void createTestContainer(String containerName) throws Exception { * @param containerName container name to use */ private static void removeTestContainer(String containerName) throws URISyntaxException, StorageException { - getAzureStorageService().removeContainer("default", LocationMode.PRIMARY_ONLY, containerName); + getAzureStorageService().removeContainer("default", containerName); } @Override @@ -145,7 +141,7 @@ protected Collection> nodePlugins() { } private String getRepositoryPath() { - String testName = "it-" + getTestName(); + final String testName = "it-" + getTestName(); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -163,21 +159,21 @@ public Settings indexSettings() { public final void wipeAzureRepositories() { try { client().admin().cluster().prepareDeleteRepository("*").get(); - } catch (RepositoryMissingException ignored) { + } catch (final RepositoryMissingException ignored) { } } public void testMultipleRepositories() { - Client client = client(); + final Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") + final PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-1")) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); - PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") + final PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-2")) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -198,14 +194,14 @@ public void testMultipleRepositories() { assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); logger.info("--> snapshot 1"); - CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap") + final CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-1").get(); assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards())); logger.info("--> snapshot 2"); - CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap") + final CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-2").get(); assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), @@ -220,7 +216,7 @@ public void testMultipleRepositories() { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion from snapshot 1"); - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap") + final RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-1").get(); assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -230,7 +226,7 @@ public void testMultipleRepositories() { assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); logger.info("--> restore other index after deletion from snapshot 2"); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap") + final RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap") .setWaitForCompletion(true).setIndices("test-idx-2").get(); assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -256,7 +252,7 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { } refresh(); - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() @@ -304,9 +300,9 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { */ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { final String repositoryName="test-repo-28"; - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") + final PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); @@ -315,14 +311,14 @@ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISy try { client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (final SnapshotMissingException ex) { // Expected } try { client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (final SnapshotMissingException ex) { // Expected } } @@ -332,9 +328,9 @@ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISy */ public void testNonExistingRepo_23() { final String repositoryName = "test-repo-test23"; - Client client = client(); + final Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) + final PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -346,7 +342,7 @@ public void testNonExistingRepo_23() { try { client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get(); fail("Shouldn't be here"); - } catch (SnapshotRestoreException ex) { + } catch (final SnapshotRestoreException ex) { // Expected } } @@ -360,7 +356,7 @@ public void testRemoveAndCreateContainer() throws Exception { createTestContainer(container); removeTestContainer(container); - ClusterAdminClient client = client().admin().cluster(); + final ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository while container is being removed"); try { client.preparePutRepository("test-repo").setType("azure") @@ -368,7 +364,7 @@ public void testRemoveAndCreateContainer() throws Exception { .put(Repository.CONTAINER_SETTING.getKey(), container) ).get(); fail("we should get a RepositoryVerificationException"); - } catch (RepositoryVerificationException e) { + } catch (final RepositoryVerificationException e) { // Fine we expect that } } @@ -382,9 +378,9 @@ public void testRemoveAndCreateContainer() throws Exception { * @throws Exception If anything goes wrong */ public void testGeoRedundantStorage() throws Exception { - Client client = client(); + final Client client = client(); logger.info("--> creating azure primary repository"); - PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary") + final PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); @@ -398,7 +394,7 @@ public void testGeoRedundantStorage() throws Exception { assertThat(endWait - startWait, lessThanOrEqualTo(30000L)); logger.info("--> creating azure secondary repository"); - PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary") + final PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary") .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.LOCATION_MODE_SETTING.getKey(), "secondary_only") diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 78fd9461ad54d..c20b99790088e 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -64,18 +64,24 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload private static final int LARGE_BLOB_THRESHOLD_BYTE_SIZE = 5 * 1024 * 1024; - private final Storage storage; - private final String bucket; + private final String bucketName; + private final String clientName; + private final GoogleCloudStorageService storageService; - GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storage) { + GoogleCloudStorageBlobStore(Settings settings, String bucketName, String clientName, GoogleCloudStorageService storageService) { super(settings); - this.bucket = bucket; - this.storage = storage; - if (doesBucketExist(bucket) == false) { - throw new BlobStoreException("Bucket [" + bucket + "] does not exist"); + this.bucketName = bucketName; + this.clientName = clientName; + this.storageService = storageService; + if (doesBucketExist(bucketName) == false) { + throw new BlobStoreException("Bucket [" + bucketName + "] does not exist"); } } + private Storage client() throws IOException { + return storageService.client(clientName); + } + @Override public BlobContainer blobContainer(BlobPath path) { return new GoogleCloudStorageBlobContainer(path, this); @@ -91,14 +97,14 @@ public void close() { } /** - * Return true if the given bucket exists + * Return true iff the given bucket exists * * @param bucketName name of the bucket - * @return true if the bucket exists, false otherwise + * @return true iff the bucket exists */ boolean doesBucketExist(String bucketName) { try { - final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> storage.get(bucketName)); + final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> client().get(bucketName)); return bucket != null; } catch (final Exception e) { throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e); @@ -106,10 +112,9 @@ boolean doesBucketExist(String bucketName) { } /** - * List blobs in the bucket under the specified path. The path root is removed. + * List blobs in the specific bucket under the specified path. The path root is removed. * - * @param path - * base path of the blobs to list + * @param path base path of the blobs to list * @return a map of blob names and their metadata */ Map listBlobs(String path) throws IOException { @@ -117,20 +122,19 @@ Map listBlobs(String path) throws IOException { } /** - * List all blobs in the bucket which have a prefix + * List all blobs in the specific bucket with names prefixed * * @param path * base path of the blobs to list. This path is removed from the * names of the blobs returned. - * @param prefix - * prefix of the blobs to list. + * @param prefix prefix of the blobs to list. * @return a map of blob names and their metadata. */ Map listBlobsByPrefix(String path, String prefix) throws IOException { final String pathPrefix = buildKey(path, prefix); final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); SocketAccess.doPrivilegedVoidIOException(() -> { - storage.get(bucket).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { + client().get(bucketName).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { assert blob.getName().startsWith(path); final String suffixName = blob.getName().substring(path.length()); mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); @@ -140,26 +144,26 @@ Map listBlobsByPrefix(String path, String prefix) throws I } /** - * Returns true if the blob exists in the bucket + * Returns true if the blob exists in the specific bucket * * @param blobName name of the blob - * @return true if the blob exists, false otherwise + * @return true iff the blob exists */ boolean blobExists(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); return blob != null; } /** - * Returns an {@link java.io.InputStream} for a given blob + * Returns an {@link java.io.InputStream} for the given blob name * * @param blobName name of the blob - * @return an InputStream + * @return the InputStream used to read the blob's content */ InputStream readBlob(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); if (blob == null) { throw new NoSuchFileException("Blob [" + blobName + "] does not exit"); } @@ -184,13 +188,13 @@ public void close() throws IOException { } /** - * Writes a blob in the bucket. + * Writes a blob in the specific bucket * * @param inputStream content of the blob to be written * @param blobSize expected size of the blob to be written */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - final BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build(); + final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { writeBlobResumable(blobInfo, inputStream); } else { @@ -208,8 +212,8 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I */ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { try { - final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException( - () -> storage.writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); + final WriteChannel writeChannel = SocketAccess + .doPrivilegedIOException(() -> client().writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { @Override public boolean isOpen() { @@ -227,7 +231,7 @@ public int write(ByteBuffer src) throws IOException { return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); } })); - } catch (StorageException se) { + } catch (final StorageException se) { if (se.getCode() == HTTP_PRECON_FAILED) { throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } @@ -249,45 +253,43 @@ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); - SocketAccess.doPrivilegedVoidIOException( - () -> { - try { - storage.create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist()); - } catch (StorageException se) { - if (se.getCode() == HTTP_PRECON_FAILED) { - throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); - } - throw se; - } - }); + try { + SocketAccess.doPrivilegedVoidIOException( + () -> client().create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist())); + } catch (final StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + } + throw se; + } } /** - * Deletes a blob in the bucket + * Deletes the blob from the specific bucket * * @param blobName name of the blob */ void deleteBlob(String blobName) throws IOException { - final BlobId blobId = BlobId.of(bucket, blobName); - final boolean deleted = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobId)); + final BlobId blobId = BlobId.of(bucketName, blobName); + final boolean deleted = SocketAccess.doPrivilegedIOException(() -> client().delete(blobId)); if (deleted == false) { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } } /** - * Deletes multiple blobs in the bucket that have a given prefix + * Deletes multiple blobs from the specific bucket all of which have prefixed names * - * @param prefix prefix of the buckets to delete + * @param prefix prefix of the blobs to delete */ void deleteBlobsByPrefix(String prefix) throws IOException { deleteBlobs(listBlobsByPrefix("", prefix).keySet()); } /** - * Deletes multiple blobs in the given bucket (uses a batch request to perform this) + * Deletes multiple blobs from the specific bucket using a batch request * - * @param blobNames names of the bucket to delete + * @param blobNames names of the blobs to delete */ void deleteBlobs(Collection blobNames) throws IOException { if (blobNames.isEmpty()) { @@ -298,13 +300,13 @@ void deleteBlobs(Collection blobNames) throws IOException { deleteBlob(blobNames.iterator().next()); return; } - final List blobIdsToDelete = blobNames.stream().map(blobName -> BlobId.of(bucket, blobName)).collect(Collectors.toList()); - final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobIdsToDelete)); + final List blobIdsToDelete = blobNames.stream().map(blob -> BlobId.of(bucketName, blob)).collect(Collectors.toList()); + final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> client().delete(blobIdsToDelete)); assert blobIdsToDelete.size() == deletedStatuses.size(); boolean failed = false; for (int i = 0; i < blobIdsToDelete.size(); i++) { if (deletedStatuses.get(i) == false) { - logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucket); + logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucketName); failed = true; } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 1d2d70584adf9..12e7fd26ff565 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -24,35 +24,34 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; - import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; -public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin { +public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { - private final Map clientsSettings; + // package-private for tests + final GoogleCloudStorageService storageService; public GoogleCloudStoragePlugin(final Settings settings) { - clientsSettings = GoogleCloudStorageClientSettings.load(settings); - } - - protected Map getClientsSettings() { - return clientsSettings; + this.storageService = createStorageService(settings); + // eagerly load client settings so that secure settings are readable (not closed) + reload(settings); } // overridable for tests - protected GoogleCloudStorageService createStorageService(Environment environment) { - return new GoogleCloudStorageService(environment, clientsSettings); + protected GoogleCloudStorageService createStorageService(Settings settings) { + return new GoogleCloudStorageService(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, - (metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, createStorageService(env))); + (metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, this.storageService)); } @Override @@ -66,4 +65,15 @@ public List> getSettings() { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, GoogleCloudStorageClientSettings.TOKEN_URI_SETTING); } + + @Override + public void reload(Settings settings) { + // Secure settings should be readable inside this method. Duplicate client + // settings in a format (`GoogleCloudStorageClientSettings`) that does not + // require for the `SecureSettings` to be open. Pass that around (the + // `GoogleCloudStorageClientSettings` instance) instead of the `Settings` + // instance. + final Map clientsSettings = GoogleCloudStorageClientSettings.load(settings); + this.storageService.refreshAndClearCache(clientsSettings); + } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index d261d738e5eee..0524f903e88b0 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -44,8 +44,6 @@ import static org.elasticsearch.common.settings.Setting.timeSetting; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; -import com.google.cloud.storage.Storage; - class GoogleCloudStorageRepository extends BlobStoreRepository { private final Logger logger = ESLoggerFactory.getLogger(GoogleCloudStorageRepository.class); @@ -109,33 +107,28 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); - String application = APPLICATION_NAME.get(metadata.settings()); - if (Strings.hasText(application)) { + final String applicationName = APPLICATION_NAME.get(metadata.settings()); + if (Strings.hasText(applicationName)) { deprecationLogger.deprecated("Setting [application_name] in repository settings is deprecated, " + "it must be specified in the client settings instead"); + storageService.setOverrideApplicationName(applicationName); } - TimeValue connectTimeout = null; - TimeValue readTimeout = null; - TimeValue timeout = HTTP_CONNECT_TIMEOUT.get(metadata.settings()); - if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + final TimeValue connectTimeout = HTTP_CONNECT_TIMEOUT.get(metadata.settings()); + if ((connectTimeout != null) && (connectTimeout.millis() != NO_TIMEOUT.millis())) { deprecationLogger.deprecated("Setting [http.connect_timeout] in repository settings is deprecated, " + "it must be specified in the client settings instead"); - connectTimeout = timeout; + storageService.setOverrideConnectTimeout(connectTimeout); } - timeout = HTTP_READ_TIMEOUT.get(metadata.settings()); - if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + + final TimeValue readTimeout = HTTP_READ_TIMEOUT.get(metadata.settings()); + if ((readTimeout != null) && (readTimeout.millis() != NO_TIMEOUT.millis())) { deprecationLogger.deprecated("Setting [http.read_timeout] in repository settings is deprecated, " + "it must be specified in the client settings instead"); - readTimeout = timeout; + storageService.setOverrideReadTimeout(readTimeout); } - TimeValue finalConnectTimeout = connectTimeout; - TimeValue finalReadTimeout = readTimeout; - - Storage client = SocketAccess.doPrivilegedIOException(() -> - storageService.createClient(clientName, application, finalConnectTimeout, finalReadTimeout)); - this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client); + this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index d3fa18ead0754..55c9573ce9cd7 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -28,12 +28,13 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; -import org.elasticsearch.common.Nullable; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; +import org.elasticsearch.common.util.LazyInitializable; import java.io.IOException; import java.net.HttpURLConnection; @@ -41,47 +42,147 @@ import java.net.URISyntaxException; import java.net.URL; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; public class GoogleCloudStorageService extends AbstractComponent { - /** Clients settings identified by client name. */ - private final Map clientsSettings; + /** + * Dictionary of client instances. Client instances are built lazily from the + * latest settings. + */ + private final AtomicReference>> clientsCache = new AtomicReference<>(emptyMap()); + + /** + * Overrides application name for client creation. + */ + @Deprecated + private String applicationName; + /** + * Overrides connect timeout for client creation. + */ + @Deprecated + private TimeValue connectTimeout; + /** + * Overrides read timeout for client creation. + */ + @Deprecated + private TimeValue readTimeout; - public GoogleCloudStorageService(final Environment environment, final Map clientsSettings) { - super(environment.settings()); - this.clientsSettings = clientsSettings; + public GoogleCloudStorageService(final Settings settings) { + super(settings); } /** - * Creates a client that can be used to manage Google Cloud Storage objects. + * Refreshes the client settings and clears the client cache. Subsequent calls to + * {@code GoogleCloudStorageService#client} will return new clients constructed + * using the parameter settings. * - * @param clientName name of client settings to use, including secure settings - * @param application deprecated application name setting overriding client settings - * @param connectTimeout deprecated connect timeout setting overriding client settings - * @param readTimeout deprecated read timeout setting overriding client settings - * @return a Client instance that can be used to manage Storage objects + * @param clientsSettings the new settings used for building clients for subsequent requests */ - public Storage createClient(final String clientName, - @Nullable final String application, - @Nullable final TimeValue connectTimeout, - @Nullable final TimeValue readTimeout) throws Exception { + public synchronized void refreshAndClearCache(Map clientsSettings) { + // build the new lazy clients + final MapBuilder> newClientsCache = MapBuilder.newMapBuilder(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + newClientsCache.put(entry.getKey(), + new LazyInitializable(() -> createClient(entry.getKey(), entry.getValue()))); + } + // make the new clients available + final Map> oldClientCache = clientsCache.getAndSet(newClientsCache.immutableMap()); + // release old clients + oldClientCache.values().forEach(LazyInitializable::reset); + } - final GoogleCloudStorageClientSettings clientSettings = clientsSettings.get(clientName); - if (clientSettings == null) { + /** + * Attempts to retrieve a client from the cache. If the client does not exist it + * will be created from the latest settings and will populate the cache. The + * returned instance should not be cached by the calling code. Instead, for each + * use, the (possibly updated) instance should be requested by calling this + * method. + * + * @param clientName name of the client settings used to create the client + * @return a cached client storage instance that can be used to manage objects + * (blobs) + */ + public Storage client(final String clientName) throws IOException { + final LazyInitializable lazyClient = clientsCache.get().get(clientName); + if (lazyClient == null) { throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: " - + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + + Strings.collectionToDelimitedString(clientsCache.get().keySet(), ",")); + } + return lazyClient.getOrCompute(); + } + + /** + * Sets the value that overrides the application name while creating the client. + * Normally the application name is the picked up from the GCS client settings in the + * elasticsearch conf file. Set to null to not override. + */ + @Deprecated + public void setOverrideApplicationName(String applicationName) { + this.applicationName = applicationName; + // release any existing client + clientsCache.get().values().forEach(LazyInitializable::reset); + } + + /** + * Sets the value that overrides the connect timeout while creating the client. + * Normally the connect timeout is picket up from the GCS client settings in + * the elasticsearch conf file. Set to null to not override. + */ + @Deprecated + public void setOverrideConnectTimeout(TimeValue connectTimeout) { + this.connectTimeout = connectTimeout; + // release any existing client + clientsCache.get().values().forEach(LazyInitializable::reset); + } + + /** + * Sets the value that overrides the read timeout while creating the client. + * Normally the read timeout is picked up from the GCS client settings in the + * elasticsearch conf file. Set to null to not override. + */ + @Deprecated + public void setOverrideReadTimeout(TimeValue readTimeout) { + this.readTimeout = readTimeout; + // release any existing client + clientsCache.get().values().forEach(LazyInitializable::reset); + } + + /** + * Creates a client that can be used to manage Google Cloud Storage objects. The client is thread-safe. + * + * @param clientName name of client settings to use, including secure settings + * @param clientSettings name of client settings to use, including secure settings + * @return a new client storage instance that can be used to manage objects + * (blobs) + */ + private Storage createClient(final String clientName, final GoogleCloudStorageClientSettings clientSettings) throws IOException { + logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, + clientSettings.getHost())); + final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> createHttpTransport(clientSettings.getHost())); + TimeValue connectTimeout = this.connectTimeout; + if (connectTimeout == null) { + connectTimeout = clientSettings.getConnectTimeout(); + } + TimeValue readTimeout = this.readTimeout; + if (readTimeout == null) { + readTimeout = clientSettings.getReadTimeout(); } - final HttpTransport httpTransport = createHttpTransport(clientSettings.getHost()); final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder() - .setConnectTimeout(connectTimeout != null ? toTimeout(connectTimeout) : toTimeout(clientSettings.getConnectTimeout())) - .setReadTimeout(readTimeout != null ? toTimeout(readTimeout) : toTimeout(clientSettings.getReadTimeout())) + .setConnectTimeout(toTimeout(connectTimeout)) + .setReadTimeout(toTimeout(readTimeout)) .setHttpTransportFactory(() -> httpTransport) .build(); final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() .setTransportOptions(httpTransportOptions) .setHeaderProvider(() -> { final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); - final String applicationName = Strings.hasLength(application) ? application : clientSettings.getApplicationName(); + String applicationName = this.applicationName; + if (false == Strings.hasLength(applicationName)) { + applicationName = clientSettings.getApplicationName(); + } if (Strings.hasLength(applicationName)) { mapBuilder.put("user-agent", applicationName); } @@ -123,6 +224,9 @@ private static HttpTransport createHttpTransport(final String endpoint) throws E builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); if (Strings.hasLength(endpoint)) { final URL endpointUrl = URI.create(endpoint).toURL(); + // it is crucial to open a connection for each URL (see {@code + // DefaultConnectionFactory#openConnection}) instead of reusing connections, + // because the storage instance has to be thread-safe as it is cached. builder.setConnectionFactory(new DefaultConnectionFactory() { @Override public HttpURLConnection openConnection(final URL originalUrl) throws IOException { diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 27736e24dbf51..0cc1243f28311 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -26,11 +26,22 @@ import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override protected BlobStore newBlobStore() { - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); + final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); + try { + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + } catch (final Exception e) { + throw new RuntimeException(e); + } + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 6ed67c1a26947..3692b26f2bbb7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -24,15 +24,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.junit.AfterClass; import java.util.Collection; import java.util.Collections; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -74,22 +71,19 @@ public MockGoogleCloudStoragePlugin(final Settings settings) { } @Override - protected GoogleCloudStorageService createStorageService(Environment environment) { - return new MockGoogleCloudStorageService(environment, getClientsSettings()); + protected GoogleCloudStorageService createStorageService(Settings settings) { + return new MockGoogleCloudStorageService(settings); } } public static class MockGoogleCloudStorageService extends GoogleCloudStorageService { - MockGoogleCloudStorageService(Environment environment, Map clientsSettings) { - super(environment, clientsSettings); + MockGoogleCloudStorageService(Settings settings) { + super(settings); } @Override - public Storage createClient(final String clientName, - final String application, - final TimeValue connectTimeout, - final TimeValue readTimeout) { + public Storage client(String clientName) { return new MockStorage(BUCKET, blobs); } } @@ -101,7 +95,7 @@ public void testChunkSize() { assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize); // chunk size in settings - int size = randomIntBetween(1, 100); + final int size = randomIntBetween(1, 100); repositoryMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", size + "mb").build()); chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData); @@ -109,7 +103,7 @@ public void testChunkSize() { // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "0").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); @@ -117,7 +111,7 @@ public void testChunkSize() { // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "-1").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); @@ -125,7 +119,7 @@ public void testChunkSize() { // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { - RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, + final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE, Settings.builder().put("chunk_size", "101mb").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 5e25307805235..4634bd3274a70 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -26,11 +26,22 @@ import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { @Override protected BlobStore newBlobStore() { - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); + final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); + try { + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + } catch (final Exception e) { + throw new RuntimeException(e); + } + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index e1f91eb1d31d3..0e11286181e9a 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -20,14 +20,15 @@ package org.elasticsearch.repositories.gcs; import com.google.cloud.storage.Storage; + import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.concurrent.ConcurrentHashMap; public class GoogleCloudStorageRepositoryDeprecationTests extends ESTestCase { @@ -39,21 +40,19 @@ public void testDeprecatedSettings() throws Exception { .put("http.read_timeout", "10s") .put("http.connect_timeout", "20s") .build(); - final RepositoryMetaData repositoryMetaData = new RepositoryMetaData("test", "gcs", repositorySettings); final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); - new GoogleCloudStorageRepository(repositoryMetaData, environment, NamedXContentRegistry.EMPTY, - new GoogleCloudStorageService(environment, GoogleCloudStorageClientSettings.load(Settings.EMPTY)) { - @Override - public Storage createClient(String clientName, String application, TimeValue connect, TimeValue read) throws Exception { - return new MockStorage("test", new ConcurrentHashMap<>()); - } - }); + new GoogleCloudStorageService(Settings.EMPTY) { + @Override + public Storage client(String clientName) throws IOException { + return new MockStorage("test", new ConcurrentHashMap<>()); + } + }); assertWarnings( - "Setting [application_name] in repository settings is deprecated, it must be specified in the client settings instead", - "Setting [http.read_timeout] in repository settings is deprecated, it must be specified in the client settings instead", - "Setting [http.connect_timeout] in repository settings is deprecated, it must be specified in the client settings instead"); + "Setting [application_name] in repository settings is deprecated, it must be specified in the client settings instead", + "Setting [http.read_timeout] in repository settings is deprecated, it must be specified in the client settings instead", + "Setting [http.connect_timeout] in repository settings is deprecated, it must be specified in the client settings instead"); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 4e87031a630b2..5fa2f3c83a2f2 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -23,28 +23,36 @@ import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.util.Collections; + +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.util.Base64; import java.util.Locale; +import java.util.UUID; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.containsString; public class GoogleCloudStorageServiceTests extends ESTestCase { public void testClientInitializer() throws Exception { - final String clientName = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); - final Environment environment = mock(Environment.class); + final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final String applicationName = randomAlphaOfLength(4); - final String hostName = randomFrom("http://", "https://") + randomAlphaOfLength(4) + ":" + randomIntBetween(1, 65535); - final String projectIdName = randomAlphaOfLength(4); + final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String endpoint = randomFrom("http://", "https://") + + randomFrom("www.elastic.co", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), connectTimeValue.getStringRep()) @@ -52,29 +60,32 @@ public void testClientInitializer() throws Exception { readTimeValue.getStringRep()) .put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), applicationName) - .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), hostName) + .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - when(environment.settings()).thenReturn(settings); - final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName); - final GoogleCloudStorageService service = new GoogleCloudStorageService(environment, - Collections.singletonMap(clientName, clientSettings)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> service.createClient("another_client", null, null, null)); - assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); - assertSettingDeprecationsAndWarnings( - new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); + final GoogleCloudStorageService service = new GoogleCloudStorageService(settings); + service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); final String deprecatedApplicationName = randomBoolean() ? null : "deprecated_" + randomAlphaOfLength(4); + service.setOverrideApplicationName(deprecatedApplicationName); final TimeValue deprecatedConnectTimeout = randomBoolean() ? null : TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + service.setOverrideConnectTimeout(deprecatedConnectTimeout); final TimeValue deprecatedReadTimeout = randomBoolean() ? null : TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final Storage storage = service.createClient(clientName, deprecatedApplicationName, deprecatedConnectTimeout, - deprecatedReadTimeout); + service.setOverrideReadTimeout(deprecatedReadTimeout); + if (randomBoolean()) { + // refresh should not override defaults + service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); + } + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.client("another_client")); + assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + assertSettingDeprecationsAndWarnings( + new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); + final Storage storage = service.client(clientName); if (deprecatedApplicationName != null) { assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(deprecatedApplicationName)); } else { assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); } - assertThat(storage.getOptions().getHost(), Matchers.is(hostName)); + assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); if (deprecatedConnectTimeout != null) { @@ -94,6 +105,58 @@ public void testClientInitializer() throws Exception { assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } + public void testReinitClientSettings() throws Exception { + final MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs11")); + secureSettings1.setFile("gcs.client.gcs2.credentials_file", serviceAccountFileContent("project_gcs12")); + final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs21")); + secureSettings2.setFile("gcs.client.gcs3.credentials_file", serviceAccountFileContent("project_gcs23")); + final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); + try (GoogleCloudStoragePlugin plugin = new GoogleCloudStoragePlugin(settings1)) { + final GoogleCloudStorageService storageService = plugin.storageService; + final Storage client11 = storageService.client("gcs1"); + assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + final Storage client12 = storageService.client("gcs2"); + assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + // client 3 is missing + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs3")); + assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); + // update client settings + plugin.reload(settings2); + // old client 1 not changed + assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + // new client 1 is changed + final Storage client21 = storageService.client("gcs1"); + assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); + // old client 2 not changed + assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + // new client2 is gone + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs2")); + assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); + // client 3 emerged + final Storage client23 = storageService.client("gcs3"); + assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); + } + } + + private byte[] serviceAccountFileContent(String projectId) throws Exception { + final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(1024); + final KeyPair keyPair = keyPairGenerator.generateKeyPair(); + final String encodedKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); + final XContentBuilder serviceAccountBuilder = jsonBuilder().startObject() + .field("type", "service_account") + .field("project_id", projectId) + .field("private_key_id", UUID.randomUUID().toString()) + .field("private_key", "-----BEGIN PRIVATE KEY-----\n" + encodedKey + "\n-----END PRIVATE KEY-----\n") + .field("client_email", "integration_test@appspot.gserviceaccount.com") + .field("client_id", "client_id") + .endObject(); + return BytesReference.toBytes(BytesReference.bytes(serviceAccountBuilder)); + } + public void testToTimeout() { assertEquals(-1, GoogleCloudStorageService.toTimeout(null).intValue()); assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 23252881cd75f..8448b2ab9e1ac 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -55,7 +55,7 @@ bundlePlugin { } additionalTest('testRepositoryCreds'){ - include '**/RepositorySettingsCredentialsTests.class' + include '**/RepositoryCredentialsTests.class' systemProperty 'es.allow_insecure_settings', 'true' } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java new file mode 100644 index 0000000000000..6734fcfb56df5 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; + +import org.elasticsearch.common.lease.Releasable; + +/** + * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference + * counting. + */ +public class AmazonS3Reference extends AbstractRefCounted implements Releasable { + + private final AmazonS3 client; + + AmazonS3Reference(AmazonS3 client) { + super("AWS_S3_CLIENT"); + this.client = client; + } + + /** + * Call when the client is not needed anymore. + */ + @Override + public void close() { + decRef(); + } + + /** + * Returns the underlying `AmazonS3` client. All method calls are permitted BUT + * NOT shutdown. Shutdown is called when reference count reaches 0. + */ + public AmazonS3 client() { + return client; + } + + @Override + protected void closeInternal() { + client.shutdown(); + } + +} \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index dbffe293a43b1..03b06c5b1bd34 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -19,14 +19,25 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.settings.Settings; +import java.io.Closeable; +import java.util.Map; -interface AwsS3Service extends LifecycleComponent { +interface AwsS3Service extends Closeable { /** - * Creates an {@code AmazonS3} client from the given repository metadata and node settings. + * Creates then caches an {@code AmazonS3} client using the current client + * settings. Returns an {@code AmazonS3Reference} wrapper which has to be + * released as soon as it is not needed anymore. */ - AmazonS3 client(Settings repositorySettings); + AmazonS3Reference client(String clientName); + + /** + * Updates settings for building clients and clears the client cache. Future + * client requests will use the new settings to lazily build new clients. + * + * @param clientsSettings the new refreshed settings + * @return the old stale settings + */ + Map refreshAndClearCache(Map clientsSettings); + } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index 842e65e554db5..94ae805023bb3 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -19,9 +19,7 @@ package org.elasticsearch.repositories.s3; -import java.util.HashMap; import java.util.Map; -import java.util.function.Function; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; @@ -32,62 +30,90 @@ import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; + import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import java.io.IOException; +import static java.util.Collections.emptyMap; -class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service { - - // pkg private for tests - static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); +class InternalAwsS3Service extends AbstractComponent implements AwsS3Service { - private final Map clientsSettings; + private volatile Map clientsCache = emptyMap(); + private volatile Map clientsSettings = emptyMap(); - private final Map clientsCache = new HashMap<>(); - - InternalAwsS3Service(Settings settings, Map clientsSettings) { + InternalAwsS3Service(Settings settings) { super(settings); - this.clientsSettings = clientsSettings; } + /** + * Refreshes the settings for the AmazonS3 clients and clears the cache of + * existing clients. New clients will be build using these new settings. Old + * clients are usable until released. On release they will be destroyed instead + * to being returned to the cache. + */ @Override - public synchronized AmazonS3 client(Settings repositorySettings) { - String clientName = CLIENT_NAME.get(repositorySettings); - AmazonS3Client client = clientsCache.get(clientName); - if (client != null) { - return client; - } + public synchronized Map refreshAndClearCache(Map clientsSettings) { + // shutdown all unused clients + // others will shutdown on their respective release + releaseCachedClients(); + final Map prevSettings = this.clientsSettings; + this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + assert this.clientsSettings.containsKey("default") : "always at least have 'default'"; + // clients are built lazily by {@link client(String)} + return prevSettings; + } - S3ClientSettings clientSettings = clientsSettings.get(clientName); - if (clientSettings == null) { - throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " + - Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + /** + * Attempts to retrieve a client by name from the cache. If the client does not + * exist it will be created. + */ + @Override + public AmazonS3Reference client(String clientName) { + AmazonS3Reference clientReference = clientsCache.get(clientName); + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; } + synchronized (this) { + clientReference = clientsCache.get(clientName); + if ((clientReference != null) && clientReference.tryIncRef()) { + return clientReference; + } + final S3ClientSettings clientSettings = clientsSettings.get(clientName); + if (clientSettings == null) { + throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " + + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + } + logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); + clientReference = new AmazonS3Reference(buildClient(clientSettings)); + clientReference.incRef(); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, clientReference).immutableMap(); + return clientReference; + } + } - logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); - - AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); - ClientConfiguration configuration = buildConfiguration(clientSettings); - - client = new AmazonS3Client(credentials, configuration); - + private AmazonS3 buildClient(S3ClientSettings clientSettings) { + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + final ClientConfiguration configuration = buildConfiguration(clientSettings); + final AmazonS3 client = buildClient(credentials, configuration); if (Strings.hasText(clientSettings.endpoint)) { client.setEndpoint(clientSettings.endpoint); } - - clientsCache.put(clientName, client); return client; } + // proxy for testing + AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + return new AmazonS3Client(credentials, configuration); + } + // pkg private for tests static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { - ClientConfiguration clientConfiguration = new ClientConfiguration(); + final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); @@ -109,27 +135,8 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { } // pkg private for tests - static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, - S3ClientSettings clientSettings, Settings repositorySettings) { - - - BasicAWSCredentials credentials = clientSettings.credentials; - if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { - if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + - " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); - } - try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); - SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { - credentials = new BasicAWSCredentials(key.toString(), secret.toString()); - } - // backcompat for reading keys out of repository settings - deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + - "store these in named clients and the elasticsearch keystore for secure settings."); - } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + - " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); - } + static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { + final BasicAWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); @@ -147,21 +154,15 @@ private static T getRepoValue(Settings repositorySettings, Setting reposi return fallback; } - @Override - protected void doStart() throws ElasticsearchException { - } - - @Override - protected void doStop() throws ElasticsearchException { - } - - @Override - protected void doClose() throws ElasticsearchException { - for (AmazonS3Client client : clientsCache.values()) { - client.shutdown(); + protected synchronized void releaseCachedClients() { + // the clients will shutdown when they will not be used anymore + for (final AmazonS3Reference clientReference : clientsCache.values()) { + clientReference.decRef(); } - - // Ensure that IdleConnectionReaper is shutdown + // clear previously cached clients, they will be build lazily + clientsCache = emptyMap(); + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } @@ -182,4 +183,10 @@ public void refresh() { SocketAccess.doPrivilegedVoid(credentials::refresh); } } + + @Override + public void close() throws IOException { + releaseCachedClients(); + } + } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 222802ae30437..102af6f9f5b4c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; @@ -47,8 +46,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -70,19 +67,20 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public boolean blobExists(String blobName) { - try { - return SocketAccess.doPrivileged(() -> blobStore.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); - } catch (Exception e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName))); + } catch (final Exception e) { throw new BlobStoreException("Failed to check if blob [" + blobName +"] exists", e); } } @Override public InputStream readBlob(String blobName) throws IOException { - try { - S3Object s3Object = SocketAccess.doPrivileged(() -> blobStore.client().getObject(blobStore.bucket(), buildKey(blobName))); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(blobStore.bucket(), + buildKey(blobName))); return s3Object.getObjectContent(); - } catch (AmazonClientException e) { + } catch (final AmazonClientException e) { if (e instanceof AmazonS3Exception) { if (404 == ((AmazonS3Exception) e).getStatusCode()) { throw new NoSuchFileException("Blob object [" + blobName + "] not found: " + e.getMessage()); @@ -110,44 +108,45 @@ public void deleteBlob(String blobName) throws IOException { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } - try { - SocketAccess.doPrivilegedVoid(() -> blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName))); - } catch (AmazonClientException e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObject(blobStore.bucket(), buildKey(blobName))); + } catch (final AmazonClientException e) { throw new IOException("Exception when deleting blob [" + blobName + "]", e); } } @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { - return AccessController.doPrivileged((PrivilegedAction>) () -> { - MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - AmazonS3 client = blobStore.client(); - SocketAccess.doPrivilegedVoid(() -> { - ObjectListing prevListing = null; - while (true) { - ObjectListing list; - if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); - } else { - if (blobNamePrefix != null) { - list = client.listObjects(blobStore.bucket(), buildKey(blobNamePrefix)); - } else { - list = client.listObjects(blobStore.bucket(), keyPath); - } - } - for (S3ObjectSummary summary : list.getObjectSummaries()) { - String name = summary.getKey().substring(keyPath.length()); - blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize())); - } - if (list.isTruncated()) { - prevListing = list; + final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + ObjectListing prevListing = null; + while (true) { + ObjectListing list; + if (prevListing != null) { + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); + } else { + if (blobNamePrefix != null) { + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), + buildKey(blobNamePrefix))); } else { - break; + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), keyPath)); } } - }); + for (final S3ObjectSummary summary : list.getObjectSummaries()) { + final String name = summary.getKey().substring(keyPath.length()); + blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize())); + } + if (list.isTruncated()) { + prevListing = list; + } else { + break; + } + } return blobsBuilder.immutableMap(); - }); + } catch (final AmazonClientException e) { + throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e); + } } @Override @@ -175,19 +174,20 @@ void executeSingleUpload(final S3BlobStore blobStore, throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size"); } - try { - final ObjectMetadata md = new ObjectMetadata(); - md.setContentLength(blobSize); - if (blobStore.serverSideEncryption()) { - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - } - - final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md); - putRequest.setStorageClass(blobStore.getStorageClass()); - putRequest.setCannedAcl(blobStore.getCannedACL()); + final ObjectMetadata md = new ObjectMetadata(); + md.setContentLength(blobSize); + if (blobStore.serverSideEncryption()) { + md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + } + final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md); + putRequest.setStorageClass(blobStore.getStorageClass()); + putRequest.setCannedAcl(blobStore.getCannedACL()); - blobStore.client().putObject(putRequest); - } catch (AmazonClientException e) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> { + clientReference.client().putObject(putRequest); + }); + } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); } } @@ -218,23 +218,23 @@ void executeMultipartUpload(final S3BlobStore blobStore, final int nbParts = multiparts.v1().intValue(); final long lastPartSize = multiparts.v2(); - assert blobSize == (nbParts - 1) * partSize + lastPartSize : "blobSize does not match multipart sizes"; + assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes"; final SetOnce uploadId = new SetOnce<>(); final String bucketName = blobStore.bucket(); boolean success = false; - try { - final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); - initRequest.setStorageClass(blobStore.getStorageClass()); - initRequest.setCannedACL(blobStore.getCannedACL()); - if (blobStore.serverSideEncryption()) { - final ObjectMetadata md = new ObjectMetadata(); - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - initRequest.setObjectMetadata(md); - } + final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); + initRequest.setStorageClass(blobStore.getStorageClass()); + initRequest.setCannedACL(blobStore.getCannedACL()); + if (blobStore.serverSideEncryption()) { + final ObjectMetadata md = new ObjectMetadata(); + md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + initRequest.setObjectMetadata(md); + } + try (AmazonS3Reference clientReference = blobStore.clientReference()) { - uploadId.set(blobStore.client().initiateMultipartUpload(initRequest).getUploadId()); + uploadId.set(SocketAccess.doPrivileged(() -> clientReference.client().initiateMultipartUpload(initRequest).getUploadId())); if (Strings.isEmpty(uploadId.get())) { throw new IOException("Failed to initialize multipart upload " + blobName); } @@ -259,7 +259,7 @@ void executeMultipartUpload(final S3BlobStore blobStore, } bytesCount += uploadRequest.getPartSize(); - final UploadPartResult uploadResponse = blobStore.client().uploadPart(uploadRequest); + final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest)); parts.add(uploadResponse.getPartETag()); } @@ -268,16 +268,19 @@ void executeMultipartUpload(final S3BlobStore blobStore, + "bytes sent but got " + bytesCount); } - CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts); - blobStore.client().completeMultipartUpload(complRequest); + final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), + parts); + SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); success = true; - } catch (AmazonClientException e) { + } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e); } finally { - if (success == false && Strings.hasLength(uploadId.get())) { + if ((success == false) && Strings.hasLength(uploadId.get())) { final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get()); - blobStore.client().abortMultipartUpload(abortRequest); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); + } } } } @@ -296,7 +299,7 @@ static Tuple numberOfMultiparts(final long totalSize, final long par throw new IllegalArgumentException("Part size must be greater than zero"); } - if (totalSize == 0L || totalSize <= partSize) { + if ((totalSize == 0L) || (totalSize <= partSize)) { return Tuple.tuple(1L, totalSize); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index a36a247fd1db6..c0f61e4d07828 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -19,13 +19,13 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; + import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -34,14 +34,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import java.security.AccessController; -import java.security.PrivilegedAction; +import java.io.IOException; import java.util.ArrayList; import java.util.Locale; class S3BlobStore extends AbstractComponent implements BlobStore { - private final AmazonS3 client; + private final AwsS3Service service; + + private final String clientName; private final String bucket; @@ -53,10 +54,11 @@ class S3BlobStore extends AbstractComponent implements BlobStore { private final StorageClass storageClass; - S3BlobStore(Settings settings, AmazonS3 client, String bucket, boolean serverSideEncryption, + S3BlobStore(Settings settings, AwsS3Service service, String clientName, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass) { super(settings); - this.client = client; + this.service = service; + this.clientName = clientName; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; this.bufferSize = bufferSize; @@ -68,12 +70,14 @@ class S3BlobStore extends AbstractComponent implements BlobStore { // Also, if invalid security credentials are used to execute this method, the // client is not able to distinguish between bucket permission errors and // invalid credential errors, and this method could return an incorrect result. - SocketAccess.doPrivilegedVoid(() -> { - if (client.doesBucketExist(bucket) == false) { - throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before " + - " creating an s3 snapshot repository backed by it."); - } - }); + try (AmazonS3Reference clientReference = clientReference()) { + SocketAccess.doPrivilegedVoid(() -> { + if (clientReference.client().doesBucketExist(bucket) == false) { + throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before " + + " creating an s3 snapshot repository backed by it."); + } + }); + } } @Override @@ -81,8 +85,8 @@ public String toString() { return bucket; } - public AmazonS3 client() { - return client; + public AmazonS3Reference clientReference() { + return service.client(clientName); } public String bucket() { @@ -104,27 +108,30 @@ public BlobContainer blobContainer(BlobPath path) { @Override public void delete(BlobPath path) { - AccessController.doPrivileged((PrivilegedAction) () -> { + try (AmazonS3Reference clientReference = clientReference()) { ObjectListing prevListing = null; - //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html - //we can do at most 1K objects per delete - //We don't know the bucket name until first object listing + // From + // http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html + // we can do at most 1K objects per delete + // We don't know the bucket name until first object listing DeleteObjectsRequest multiObjectDeleteRequest = null; - ArrayList keys = new ArrayList(); + final ArrayList keys = new ArrayList<>(); while (true) { ObjectListing list; if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); + final ObjectListing finalPrevListing = prevListing; + list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); } else { - list = client.listObjects(bucket, path.buildAsString()); + list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(bucket, path.buildAsString())); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } - for (S3ObjectSummary summary : list.getObjectSummaries()) { + for (final S3ObjectSummary summary : list.getObjectSummaries()) { keys.add(new KeyVersion(summary.getKey())); - //Every 500 objects batch the delete request + // Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } @@ -137,14 +144,15 @@ public void delete(BlobPath path) { } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; + SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); } - return null; - }); + } } @Override - public void close() { + public void close() throws IOException { + this.service.close(); } public CannedAccessControlList getCannedACL() { @@ -154,18 +162,18 @@ public CannedAccessControlList getCannedACL() { public StorageClass getStorageClass() { return storageClass; } public static StorageClass initStorageClass(String storageClass) { - if (storageClass == null || storageClass.equals("")) { + if ((storageClass == null) || storageClass.equals("")) { return StorageClass.Standard; } try { - StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); + final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); if (_storageClass.equals(StorageClass.Glacier)) { throw new BlobStoreException("Glacier storage class is not supported"); } return _storageClass; - } catch (IllegalArgumentException illegalArgumentException) { + } catch (final IllegalArgumentException illegalArgumentException) { throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class."); } } @@ -174,11 +182,11 @@ public static StorageClass initStorageClass(String storageClass) { * Constructs canned acl from string */ public static CannedAccessControlList initCannedACL(String cannedACL) { - if (cannedACL == null || cannedACL.equals("")) { + if ((cannedACL == null) || cannedACL.equals("")) { return CannedAccessControlList.Private; } - for (CannedAccessControlList cur : CannedAccessControlList.values()) { + for (final CannedAccessControlList cur : CannedAccessControlList.values()) { if (cur.toString().equalsIgnoreCase(cannedACL)) { return cur; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 4d32d2518fff1..ef6088fe154bf 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -24,10 +24,11 @@ import java.util.Locale; import java.util.Map; import java.util.Set; - import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; import com.amazonaws.auth.BasicAWSCredentials; + +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -38,7 +39,7 @@ /** * A container for settings used to create an S3 client. */ -class S3ClientSettings { +final class S3ClientSettings { // prefix for s3 client settings private static final String PREFIX = "s3.client."; @@ -119,7 +120,7 @@ class S3ClientSettings { /** Whether the s3 client should use an exponential backoff retry policy. */ final boolean throttleRetries; - private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, + protected S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxRetries, boolean throttleRetries) { this.credentials = credentials; @@ -140,9 +141,9 @@ private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Proto * Note this will always at least return a client named "default". */ static Map load(Settings settings) { - Set clientNames = settings.getGroups(PREFIX).keySet(); - Map clients = new HashMap<>(); - for (String clientName : clientNames) { + final Set clientNames = settings.getGroups(PREFIX).keySet(); + final Map clients = new HashMap<>(); + for (final String clientName : clientNames) { clients.put(clientName, getClientSettings(settings, clientName)); } if (clients.containsKey("default") == false) { @@ -153,23 +154,64 @@ static Map load(Settings settings) { return Collections.unmodifiableMap(clients); } - // pkg private for tests - /** Parse settings for a single client. */ - static S3ClientSettings getClientSettings(Settings settings, String clientName) { + static Map overrideCredentials(Map clientsSettings, + BasicAWSCredentials credentials) { + final MapBuilder mapBuilder = new MapBuilder<>(); + for (final Map.Entry entry : clientsSettings.entrySet()) { + final S3ClientSettings s3ClientSettings = new S3ClientSettings(credentials, entry.getValue().endpoint, + entry.getValue().protocol, entry.getValue().proxyHost, entry.getValue().proxyPort, entry.getValue().proxyUsername, + entry.getValue().proxyPassword, entry.getValue().readTimeoutMillis, entry.getValue().maxRetries, + entry.getValue().throttleRetries); + mapBuilder.put(entry.getKey(), s3ClientSettings); + } + return mapBuilder.immutableMap(); + } + + static boolean checkDeprecatedCredentials(Settings repositorySettings) { + if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) { + if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]"); + } + return true; + } else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + + " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]"); + } + return false; + } + + // backcompat for reading keys out of repository settings (clusterState) + static BasicAWSCredentials loadDeprecatedCredentials(Settings repositorySettings) { + assert checkDeprecatedCredentials(repositorySettings); + try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { + return new BasicAWSCredentials(key.toString(), secret.toString()); + } + } + + static BasicAWSCredentials loadCredentials(Settings settings, String clientName) { try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); - SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); - SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { - BasicAWSCredentials credentials = null; + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING);) { if (accessKey.length() != 0) { if (secretKey.length() != 0) { - credentials = new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); } else { throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); } } else if (secretKey.length() != 0) { throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); } + return null; + } + } + + // pkg private for tests + /** Parse settings for a single client. */ + static S3ClientSettings getClientSettings(Settings settings, String clientName) { + final BasicAWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName); + try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { return new S3ClientSettings( credentials, getConfigValue(settings, clientName, ENDPOINT_SETTING), @@ -187,7 +229,7 @@ static S3ClientSettings getClientSettings(Settings settings, String clientName) private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index c185027d67f26..063e266837bad 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,7 +19,8 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.auth.BasicAWSCredentials; + import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -35,6 +36,9 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import java.io.IOException; +import java.util.Map; +import java.util.function.Function; /** * Shared file system implementation of the BlobStoreRepository @@ -134,6 +138,8 @@ class S3Repository extends BlobStoreRepository { */ static final Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl"); + static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); + /** * Specifies the path within bucket to repository data. Defaults to root directory. */ @@ -143,23 +149,24 @@ class S3Repository extends BlobStoreRepository { private final BlobPath basePath; - private ByteSizeValue chunkSize; + private final ByteSizeValue chunkSize; - private boolean compress; + private final boolean compress; /** * Constructs an s3 backed repository */ - S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) { + S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, + AwsS3Service awsService) throws IOException { super(metadata, settings, namedXContentRegistry); - String bucket = BUCKET_SETTING.get(metadata.settings()); + final String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } - boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = COMPRESS_SETTING.get(metadata.settings()); @@ -170,17 +177,22 @@ class S3Repository extends BlobStoreRepository { } // Parse and validate the user's S3 Storage Class setting - String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + final String clientName = CLIENT_NAME.get(metadata.settings()); logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - AmazonS3 client = s3Service.client(metadata.settings()); - blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + // deprecated behavior: override client credentials from the cluster state + // (repository settings) + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + overrideCredentialsFromClusterState(awsService); + } + blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); - String basePath = BASE_PATH_SETTING.get(metadata.settings()); + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); if (Strings.hasLength(basePath)) { this.basePath = new BlobPath().add(basePath); } else { @@ -207,4 +219,14 @@ protected boolean isCompress() { protected ByteSizeValue chunkSize() { return chunkSize; } + + void overrideCredentialsFromClusterState(AwsS3Service awsService) { + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); + // hack, but that's ok because the whole if branch should be axed + final Map prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY)); + final Map newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials); + awsService.refreshAndClearCache(newSettings); + } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index e31495efc0eef..93561c94d2b9a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.s3; +import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.Arrays; @@ -28,18 +29,20 @@ import com.amazonaws.util.json.Jackson; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; /** * A plugin to add a repository type that writes to and from the AWS S3. */ -public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { +public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { static { SpecialPermission.check(); @@ -50,30 +53,40 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); - } catch (ClassNotFoundException e) { + } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } return null; }); } - private final Map clientsSettings; + private final AwsS3Service awsS3Service; public S3RepositoryPlugin(Settings settings) { + this.awsS3Service = getAwsS3Service(settings); // eagerly load client settings so that secure settings are read - clientsSettings = S3ClientSettings.load(settings); - assert clientsSettings.isEmpty() == false : "always at least have 'default'"; + final Map clientsSettings = S3ClientSettings.load(settings); + this.awsS3Service.refreshAndClearCache(clientsSettings); } - // overridable for tests - protected AwsS3Service createStorageService(Settings settings) { - return new InternalAwsS3Service(settings, clientsSettings); + protected S3RepositoryPlugin(AwsS3Service awsS3Service) { + this.awsS3Service = awsS3Service; + } + + // proxy method for testing + protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) + throws IOException { + return new S3Repository(metadata, settings, namedXContentRegistry, awsS3Service); + } + + // proxy method for testing + protected AwsS3Service getAwsS3Service(Settings settings) { + return new InternalAwsS3Service(settings); } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { - return Collections.singletonMap(S3Repository.TYPE, - (metadata) -> new S3Repository(metadata, env.settings(), namedXContentRegistry, createStorageService(env.settings()))); + return Collections.singletonMap(S3Repository.TYPE, (metadata) -> getS3Repository(metadata, env.settings(), namedXContentRegistry)); } @Override @@ -94,4 +107,16 @@ public List> getSettings() { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING); } + + @Override + public void reload(Settings settings) { + // secure settings should be readable + final Map clientsSettings = S3ClientSettings.load(settings); + awsS3Service.refreshAndClearCache(clientsSettings); + } + + @Override + public void close() throws IOException { + awsS3Service.close(); + } } diff --git a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy index d8fca1fc89938..5fd69b4c2fc3f 100644 --- a/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-s3/src/main/plugin-metadata/plugin-security.policy @@ -37,4 +37,7 @@ grant { // s3 client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; + + // only for tests : org.elasticsearch.repositories.s3.S3RepositoryPlugin + permission java.util.PropertyPermission "es.allow_insecure_settings", "read,write"; }; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index 4a6d1c123838d..e3d6d8ac5e410 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; @@ -31,7 +30,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; @@ -181,14 +179,13 @@ public void testEncryption() { Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); - RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY); - AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings); - - String bucketName = bucket.get("bucket"); - logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); - List summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries(); - for (S3ObjectSummary summary : summaries) { - assertThat(s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); + try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) { + String bucketName = bucket.get("bucket"); + logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); + List summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries(); + for (S3ObjectSummary summary : summaries) { + assertThat(s3Client.client().getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); + } } logger.info("--> delete some data"); @@ -445,8 +442,7 @@ public void cleanRepositoryFiles(String basePath) { // We check that settings has been set in elasticsearch.yml integration test file // as described in README assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue()); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY); - try { + try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html //we can do at most 1K objects per delete @@ -456,9 +452,9 @@ public void cleanRepositoryFiles(String basePath) { while (true) { ObjectListing list; if (prevListing != null) { - list = client.listNextBatchOfObjects(prevListing); + list = s3Client.client().listNextBatchOfObjects(prevListing); } else { - list = client.listObjects(bucketName, basePath); + list = s3Client.client().listObjects(bucketName, basePath); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } for (S3ObjectSummary summary : list.getObjectSummaries()) { @@ -466,7 +462,7 @@ public void cleanRepositoryFiles(String basePath) { //Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + s3Client.client().deleteObjects(multiObjectDeleteRequest); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } @@ -479,7 +475,7 @@ public void cleanRepositoryFiles(String basePath) { } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); - client.deleteObjects(multiObjectDeleteRequest); + s3Client.client().deleteObjects(multiObjectDeleteRequest); } } catch (Exception ex) { logger.warn((Supplier) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java index bcab130e7d531..91b364011b80a 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Wrapper.java @@ -727,4 +727,9 @@ public BucketReplicationConfiguration getBucketReplicationConfiguration(GetBucke public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException { return delegate.headBucket(headBucketRequest); } + + @Override + public void shutdown() { + delegate.shutdown(); + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 353de31fa1873..6f55f3ed345df 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -21,75 +21,89 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.internal.StaticCredentialsProvider; + import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import java.util.Locale; +import java.util.Map; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class AwsS3ServiceImplTests extends ESTestCase { - public void testAWSCredentialsWithSystemProviders() { - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); - AWSCredentialsProvider credentialsProvider = - InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); + public void testAWSCredentialsDefaultToInstanceProviders() { + final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName); + final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } - public void testAwsCredsDefaultSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client.default.access_key", "aws_key"); - secureSettings.setString("s3.client.default.secret_key", "aws_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - assertCredentials(Settings.EMPTY, settings, "aws_key", "aws_secret"); - } - - public void testAwsCredsExplicitConfigSettings() { - Settings repositorySettings = Settings.builder().put(InternalAwsS3Service.CLIENT_NAME.getKey(), "myconfig").build(); - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("s3.client.myconfig.access_key", "aws_key"); - secureSettings.setString("s3.client.myconfig.secret_key", "aws_secret"); - secureSettings.setString("s3.client.default.access_key", "wrong_key"); - secureSettings.setString("s3.client.default.secret_key", "wrong_secret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - assertCredentials(repositorySettings, settings, "aws_key", "aws_secret"); - } - - public void testRepositorySettingsCredentialsDisallowed() { - Settings repositorySettings = Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("Setting [access_key] is insecure")); - } - - public void testRepositorySettingsCredentialsMissingKey() { - Settings repositorySettings = Settings.builder().put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("must be accompanied by setting [access_key]")); + public void testAWSCredentialsFromKeystore() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key")); + assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key")); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } - public void testRepositorySettingsCredentialsMissingSecret() { - Settings repositorySettings = Settings.builder().put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key").build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret")); - assertThat(e.getMessage(), containsString("must be accompanied by setting [secret_key]")); + public void testSetDefaultCredential() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String awsAccessKey = randomAlphaOfLength(8); + final String awsSecretKey = randomAlphaOfLength(8); + secureSettings.setString("s3.client.default.access_key", awsAccessKey); + secureSettings.setString("s3.client.default.secret_key", awsSecretKey); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + assertThat(allClientsSettings.size(), is(1)); + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey)); + assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey)); } - private void assertCredentials(Settings singleRepositorySettings, Settings settings, - String expectedKey, String expectedSecret) { - String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, - clientSettings, singleRepositorySettings).getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); - assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); + public void testCredentialsIncomplete() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final boolean missingOrMissing = randomBoolean(); + if (missingOrMissing) { + secureSettings.setString("s3.client." + clientName + ".access_key", "aws_access_key"); + } else { + secureSettings.setString("s3.client." + clientName + ".secret_key", "aws_secret_key"); + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + final Exception e = expectThrows(IllegalArgumentException.class, () -> S3ClientSettings.load(settings)); + if (missingOrMissing) { + assertThat(e.getMessage(), containsString("Missing secret key for s3 client [" + clientName + "]")); + } else { + assertThat(e.getMessage(), containsString("Missing access key for s3 client [" + clientName + "]")); + } } public void testAWSDefaultConfiguration() { @@ -98,10 +112,10 @@ public void testAWSDefaultConfiguration() { } public void testAWSConfigurationWithAwsSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); + final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.proxy.username", "aws_proxy_username"); secureSettings.setString("s3.client.default.proxy.password", "aws_proxy_password"); - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .setSecureSettings(secureSettings) .put("s3.client.default.protocol", "http") .put("s3.client.default.proxy.host", "aws_proxy_host") @@ -113,7 +127,7 @@ public void testAWSConfigurationWithAwsSettings() { } public void testRepositoryMaxRetries() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("s3.client.default.max_retries", 5) .build(); launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, @@ -123,7 +137,7 @@ public void testRepositoryMaxRetries() { public void testRepositoryThrottleRetries() { final boolean throttling = randomBoolean(); - Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); + final Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); } @@ -137,8 +151,8 @@ private void launchAWSConfigurationTest(Settings settings, boolean expectedUseThrottleRetries, int expectedReadTimeout) { - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); + final ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -152,15 +166,15 @@ private void launchAWSConfigurationTest(Settings settings, } public void testEndpointSetting() { - Settings settings = Settings.builder() + final Settings settings = Settings.builder() .put("s3.client.default.endpoint", "s3.endpoint") .build(); assertEndpoint(Settings.EMPTY, settings, "s3.endpoint"); } private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) { - String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings); - S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + final String configName = S3Repository.CLIENT_NAME.get(repositorySettings); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); assertThat(clientSettings.endpoint, is(expectedEndpoint)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index 33d5d5fbc2038..d610e6d74a06d 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -157,6 +157,11 @@ public void deleteObject(final DeleteObjectRequest request) throws AmazonClientE throw exception; } } + + @Override + public void shutdown() { + // TODO check close + } @Override public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws SdkClientException { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java new file mode 100644 index 0000000000000..f3bd894977999 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.s3.AmazonS3; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import static org.hamcrest.Matchers.is; + +@SuppressForbidden(reason = "test fixture requires System.setProperty") +public class RepositoryCredentialsTests extends ESTestCase { + + static { + AccessController.doPrivileged((PrivilegedAction) () -> { + // required for client settings overwriting + System.setProperty("es.allow_insecure_settings", "true"); + return null; + }); + } + + static final class ProxyS3RepositoryPlugin extends S3RepositoryPlugin { + + static final class ClientAndCredentials extends AmazonS3Wrapper { + final AWSCredentialsProvider credentials; + + ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) { + super(delegate); + this.credentials = credentials; + } + + @Override + public boolean doesBucketExist(String bucketName) { + return true; + } + } + + static final class ProxyInternalAwsS3Service extends InternalAwsS3Service { + + ProxyInternalAwsS3Service(Settings settings) { + super(settings); + } + + @Override + AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { + final AmazonS3 client = super.buildClient(credentials, configuration); + return new ClientAndCredentials(client, credentials); + } + + } + + protected ProxyS3RepositoryPlugin(Settings settings) { + super(settings); + } + + @Override + protected AwsS3Service getAwsS3Service(Settings settings) { + return new ProxyInternalAwsS3Service(settings); + } + + } + + public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException { + final int clientsCount = randomIntBetween(0, 4); + final String[] clientNames = new String[clientsCount + 1]; + clientNames[0] = "default"; + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "secure_aws_key"); + secureSettings.setString("s3.client.default.secret_key", "secure_aws_secret"); + for (int i = 0; i < clientsCount; i++) { + final String clientName = "client_" + i; + secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key_" + i); + secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret_" + i); + clientNames[i + 1] = clientName; + } + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + // repository settings for credentials override node secure settings + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() + .put(S3Repository.CLIENT_NAME.getKey(), randomFrom(clientNames)) + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + + public void testRepositoryCredentialsOnly() throws IOException { + // repository settings for credentials override node secure settings + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", + Settings.builder() + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") + .build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + + public void testReinitSecureCredentials() throws IOException { + final String clientName = randomFrom("default", "some_client"); + // initial client node settings + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + // repository settings + final Settings.Builder builder = Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName); + final boolean repositorySettings = randomBoolean(); + if (repositorySettings) { + builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key"); + builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret"); + } + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); + try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); + S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials + .getCredentials(); + if (repositorySettings) { + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); + } + // new settings + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString("s3.client." + clientName + ".access_key", "new_secret_aws_key"); + newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret"); + final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build(); + // reload S3 plugin settings + s3Plugin.reload(newSettings); + // check the not-yet-closed client reference still has the same credentials + if (repositorySettings) { + assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); + } else { + assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key")); + assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret")); + } + } + // check credentials have been updated + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials + .getCredentials(); + assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); + assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret")); + } + } + if (repositorySettings) { + assertWarnings( + "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version.", + "Using s3 access/secret key from repository settings. Instead store these in named clients and" + + " the elasticsearch keystore for secure settings.", + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + } + +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java deleted file mode 100644 index c3e7069fdfd65..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositorySettingsCredentialsTests.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.auth.AWSCredentials; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; - -public class RepositorySettingsCredentialsTests extends ESTestCase { - - public void testRepositorySettingsCredentials() { - Settings repositorySettings = Settings.builder() - .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key") - .put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build(); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, - S3ClientSettings.getClientSettings(Settings.EMPTY, "default"), repositorySettings).getCredentials(); - assertEquals("aws_key", credentials.getAWSAccessKeyId()); - assertEquals("aws_secret", credentials.getAWSSecretKey()); - assertSettingDeprecationsAndWarnings(new Setting[] { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING }, - "Using s3 access/secret key from repository settings. " + - "Instead store these in named clients and the elasticsearch keystore for secure settings."); - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index c760e86d1353f..b2afd826c5b8e 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -57,6 +57,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { @@ -74,7 +75,7 @@ public void testExecuteSingleUploadBlobSizeTooLarge() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize)); assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } @@ -86,7 +87,7 @@ public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() { final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); final String blobName = randomAlphaOfLengthBetween(1, 10); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2))); assertEquals("Upload request size [2097152] can't be larger than buffer size", e.getMessage()); } @@ -121,7 +122,8 @@ public void testExecuteSingleUpload() throws IOException { } final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); final ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult()); @@ -146,7 +148,7 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); @@ -157,7 +159,7 @@ public void testExecuteMultipartUploadBlobSizeTooSmall() { final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage()); @@ -191,7 +193,8 @@ public void testExecuteMultipartUpload() throws IOException { } final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); final ArgumentCaptor initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class); final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult(); @@ -201,7 +204,7 @@ public void testExecuteMultipartUpload() throws IOException { final ArgumentCaptor uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); final List expectedEtags = new ArrayList<>(); - long partSize = Math.min(bufferSize, blobSize); + final long partSize = Math.min(bufferSize, blobSize); long totalBytes = 0; do { expectedEtags.add(randomAlphaOfLength(50)); @@ -238,7 +241,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(numberOfParts.v1().intValue(), uploadRequests.size()); for (int i = 0; i < uploadRequests.size(); i++) { - UploadPartRequest uploadRequest = uploadRequests.get(i); + final UploadPartRequest uploadRequest = uploadRequests.get(i); assertEquals(bucketName, uploadRequest.getBucketName()); assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey()); @@ -260,7 +263,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey()); assertEquals(initResult.getUploadId(), compRequest.getUploadId()); - List actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList()); + final List actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList()); assertEquals(expectedEtags, actualETags); } @@ -278,7 +281,11 @@ public void testExecuteMultipartUploadAborted() { when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values())); final AmazonS3 client = mock(AmazonS3.class); - when(blobStore.client()).thenReturn(client); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + doAnswer(invocation -> { + clientReference.incRef(); + return clientReference; + }).when(blobStore).clientReference(); final String uploadId = randomAlphaOfLength(25); @@ -360,7 +367,7 @@ public void testExecuteMultipartUploadAborted() { } public void testNumberOfMultipartsWithZeroPartSize() { - IllegalArgumentException e = + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> S3BlobContainer.numberOfMultiparts(randomNonNegativeLong(), 0L)); assertEquals("Part size must be greater than zero", e.getMessage()); } @@ -382,7 +389,7 @@ public void testNumberOfMultiparts() { // Fits in N parts plus a bit more final long remaining = randomIntBetween(1, (size > Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) size - 1); - assertNumberOfMultiparts(factor + 1, remaining, size * factor + remaining, size); + assertNumberOfMultiparts(factor + 1, remaining, (size * factor) + remaining, size); } private static void assertNumberOfMultiparts(final int expectedParts, final long expectedRemaining, long totalSize, long partSize) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e622a5573253d..fecc6ef71953e 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.repositories.s3; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.client.node.NodeClient; @@ -49,7 +48,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -91,11 +89,9 @@ public static void wipeRepository() { } protected void createTestRepository(final String name) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(S3Repository.TYPE) - .setSettings(Settings.builder() + assertAcked(client().admin().cluster().preparePutRepository(name).setType(S3Repository.TYPE).setSettings(Settings.builder() .put(S3Repository.BUCKET_SETTING.getKey(), bucket) - .put(InternalAwsS3Service.CLIENT_NAME.getKey(), client) + .put(S3Repository.CLIENT_NAME.getKey(), client) .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) @@ -117,13 +113,17 @@ public TestS3RepositoryPlugin(final Settings settings) { @Override public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { - return Collections.singletonMap(S3Repository.TYPE, (metadata) -> - new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) { - @Override - public synchronized AmazonS3 client(final Settings repositorySettings) { - return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); - } - })); + return Collections.singletonMap(S3Repository.TYPE, + (metadata) -> new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings()) { + @Override + public synchronized AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass)); + } + }) { + @Override + void overrideCredentialsFromClusterState(AwsS3Service awsService) { + } + }); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 4a23e4efa9a29..a44946b6b3ffa 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -115,7 +115,15 @@ public static S3BlobStore randomMockS3BlobStore() { storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); } - AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); - return new S3BlobStore(Settings.EMPTY, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + final String theClientName = randomAlphaOfLength(4); + final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); + final AwsS3Service service = new InternalAwsS3Service(Settings.EMPTY) { + @Override + public synchronized AmazonS3Reference client(String clientName) { + assert theClientName.equals(clientName); + return new AmazonS3Reference(client); + } + }; + return new S3BlobStore(Settings.EMPTY, service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 7da65c27d8194..5c0aada66585c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.services.s3.AbstractAmazonS3; -import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -31,18 +30,25 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; - import java.io.IOException; +import java.util.Collections; +import java.util.Map; import static org.hamcrest.Matchers.containsString; public class S3RepositoryTests extends ESTestCase { private static class DummyS3Client extends AbstractAmazonS3 { + @Override public boolean doesBucketExist(String bucketName) { return true; } + + @Override + public void shutdown() { + // TODO check is closed + } } private static class DummyS3Service extends AbstractLifecycleComponent implements AwsS3Service { @@ -56,53 +62,70 @@ protected void doStop() {} @Override protected void doClose() {} @Override - public AmazonS3 client(Settings settings) { - return new DummyS3Client(); + public AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(new DummyS3Client()); + } + + @Override + public Map refreshAndClearCache(Map clientsSettings) { + return Collections.emptyMap(); + } + + @Override + public void close() { } } public void testInvalidChunkBufferSizeSettings() throws IOException { // chunk < buffer should fail - assertInvalidBuffer(10, 5, RepositoryException.class, "chunk_size (5mb) can't be lower than buffer_size (10mb)."); + final Settings s1 = bufferAndChunkSettings(10, 5); + final Exception e1 = expectThrows(RepositoryException.class, + () -> new S3Repository(getRepositoryMetaData(s1), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())); + assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)")); // chunk > buffer should pass - assertValidBuffer(5, 10); + final Settings s2 = bufferAndChunkSettings(5, 10); + new S3Repository(getRepositoryMetaData(s2), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); // chunk = buffer should pass - assertValidBuffer(5, 5); + final Settings s3 = bufferAndChunkSettings(5, 5); + new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); // buffer < 5mb should fail - assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"); - // chunk > 5tb should fail - assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"); + final Settings s4 = bufferAndChunkSettings(4, 10); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + .close()); + assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]")); + final Settings s5 = bufferAndChunkSettings(5, 6000000); + final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class, + () -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + .close()); + assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]")); } - private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build()); - new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); + private Settings bufferAndChunkSettings(long buffer, long chunk) { + return Settings.builder() + .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) + .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) + .build(); } - private void assertInvalidBuffer(int bufferMB, int chunkMB, Class clazz, String msg) throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build()); - - Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, - new DummyS3Service())); - assertThat(e.getMessage(), containsString(msg)); + private RepositoryMetaData getRepositoryMetaData(Settings settings) { + return new RepositoryMetaData("dummy-repo", "mock", Settings.builder().put(settings).build()); } public void testBasePathSetting() throws IOException { - RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); - S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()); - assertEquals("foo/bar/", s3repo.basePath().buildAsString()); + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() + .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); + try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + assertEquals("foo/bar/", s3repo.basePath().buildAsString()); + } } - public void testDefaultBufferSize() { - ByteSizeValue defaultBufferSize = S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY); - assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB))); - assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB))); + public void testDefaultBufferSize() throws IOException { + final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes(); + assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); + assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); + } } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java index b483c168d6d77..123d417333bb0 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java @@ -51,7 +51,7 @@ public class TestAmazonS3 extends AmazonS3Wrapper { private double writeFailureRate = 0.0; private double readFailureRate = 0.0; - private String randomPrefix; + private final String randomPrefix; ConcurrentMap accessCounts = new ConcurrentHashMap(); @@ -76,18 +76,18 @@ public TestAmazonS3(AmazonS3 delegate, Settings settings) { @Override public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException { if (shouldFail(bucketName, key, writeFailureRate)) { - long length = metadata.getContentLength(); - long partToRead = (long) (length * randomDouble()); - byte[] buffer = new byte[1024]; + final long length = metadata.getContentLength(); + final long partToRead = (long) (length * randomDouble()); + final byte[] buffer = new byte[1024]; for (long cur = 0; cur < partToRead; cur += buffer.length) { try { - input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur)); - } catch (IOException ex) { + input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); + } catch (final IOException ex) { throw new ElasticsearchException("cannot read input stream", ex); } } logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception"); ex.setStatusCode(400); ex.setErrorCode("RequestTimeout"); throw ex; @@ -99,18 +99,18 @@ public PutObjectResult putObject(String bucketName, String key, InputStream inpu @Override public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException { if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) { - long length = request.getPartSize(); - long partToRead = (long) (length * randomDouble()); - byte[] buffer = new byte[1024]; + final long length = request.getPartSize(); + final long partToRead = (long) (length * randomDouble()); + final byte[] buffer = new byte[1024]; for (long cur = 0; cur < partToRead; cur += buffer.length) { try (InputStream input = request.getInputStream()){ - input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur)); - } catch (IOException ex) { + input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); + } catch (final IOException ex) { throw new ElasticsearchException("cannot read input stream", ex); } } logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey()); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception"); ex.setStatusCode(400); ex.setErrorCode("RequestTimeout"); throw ex; @@ -123,7 +123,7 @@ public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClien public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException { if (shouldFail(bucketName, key, readFailureRate)) { logger.info("--> random read failure on getObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception"); + final AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception"); ex.setStatusCode(404); throw ex; } else { @@ -135,7 +135,7 @@ private boolean shouldFail(String bucketName, String key, double probability) { if (probability > 0.0) { String path = randomPrefix + "-" + bucketName + "+" + key; path += "/" + incrementAndGet(path); - return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability; + return Math.abs(hashCode(path)) < (Integer.MAX_VALUE * probability); } else { return false; } @@ -143,14 +143,14 @@ private boolean shouldFail(String bucketName, String key, double probability) { private int hashCode(String path) { try { - MessageDigest digest = MessageDigest.getInstance("MD5"); - byte[] bytes = digest.digest(path.getBytes("UTF-8")); + final MessageDigest digest = MessageDigest.getInstance("MD5"); + final byte[] bytes = digest.digest(path.getBytes("UTF-8")); int i = 0; return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16) | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF); - } catch (UnsupportedEncodingException ex) { + } catch (final UnsupportedEncodingException ex) { throw new ElasticsearchException("cannot calculate hashcode", ex); - } catch (NoSuchAlgorithmException ex) { + } catch (final NoSuchAlgorithmException ex) { throw new ElasticsearchException("cannot calculate hashcode", ex); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java index c5012d9c68bc7..f376f73820624 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java @@ -22,45 +22,39 @@ import java.util.IdentityHashMap; import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; public class TestAwsS3Service extends InternalAwsS3Service { public static class TestPlugin extends S3RepositoryPlugin { public TestPlugin(Settings settings) { - super(settings); - } - @Override - protected AwsS3Service createStorageService(Settings settings) { - return new TestAwsS3Service(settings); + super(new TestAwsS3Service(settings)); } } - IdentityHashMap clients = new IdentityHashMap<>(); + IdentityHashMap clients = new IdentityHashMap<>(); public TestAwsS3Service(Settings settings) { - super(settings, S3ClientSettings.load(settings)); + super(settings); } @Override - public synchronized AmazonS3 client(Settings repositorySettings) { - return cachedWrapper(super.client(repositorySettings)); + public synchronized AmazonS3Reference client(String clientName) { + return new AmazonS3Reference(cachedWrapper(super.client(clientName))); } - private AmazonS3 cachedWrapper(AmazonS3 client) { - TestAmazonS3 wrapper = clients.get(client); + private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) { + TestAmazonS3 wrapper = clients.get(clientReference); if (wrapper == null) { - wrapper = new TestAmazonS3(client, settings); - clients.put(client, wrapper); + wrapper = new TestAmazonS3(clientReference.client(), settings); + clients.put(clientReference, wrapper); } return wrapper; } @Override - protected synchronized void doClose() throws ElasticsearchException { - super.doClose(); + protected synchronized void releaseCachedClients() { + super.releaseCachedClients(); clients.clear(); } - } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 17acf7c10f534..7ddb39b6d6225 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -29,6 +29,8 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; @@ -241,6 +243,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; @@ -491,6 +494,7 @@ public void reg actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); + actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); //Indexed scripts actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); @@ -610,6 +614,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRecoveryAction(settings, restController)); + registerHandler.accept(new RestReloadSecureSettingsAction(settings, restController)); + // Scripts API registerHandler.accept(new RestGetStoredScriptAction(settings, restController)); registerHandler.accept(new RestPutStoredScriptAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java new file mode 100644 index 0000000000000..67fb47260b889 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class NodesReloadSecureSettingsAction + extends Action { + + public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); + public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; + + private NodesReloadSecureSettingsAction() { + super(NAME); + } + + @Override + public NodesReloadSecureSettingsResponse newResponse() { + return new NodesReloadSecureSettingsResponse(); + } + + @Override + public NodesReloadSecureSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new NodesReloadSecureSettingsRequestBuilder(client, this); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java new file mode 100644 index 0000000000000..50df7b1bb26e0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for a reload secure settings action + */ +public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { + + /** + * The password which is broadcasted to all nodes, but is never stored on + * persistent storage. The password is used to reread and decrypt the contents + * of the node's keystore (backing the implementation of + * {@code SecureSettings}). + */ + private SecureString secureSettingsPassword; + + public NodesReloadSecureSettingsRequest() { + } + + /** + * Reload secure settings only on certain nodes, based on the nodes ids + * specified. If none are passed, secure settings will be reloaded on all the + * nodes. + */ + public NodesReloadSecureSettingsRequest(String... nodesIds) { + super(nodesIds); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (secureSettingsPassword == null) { + validationException = addValidationError("secure settings password cannot be null (use empty string instead)", + validationException); + } + return validationException; + } + + public SecureString secureSettingsPassword() { + return secureSettingsPassword; + } + + public NodesReloadSecureSettingsRequest secureStorePassword(SecureString secureStorePassword) { + this.secureSettingsPassword = secureStorePassword; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + final byte[] passwordBytes = in.readByteArray(); + try { + this.secureSettingsPassword = new SecureString(utf8BytesToChars(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + final byte[] passwordBytes = charsToUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeByteArray(passwordBytes); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + + /** + * Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding + * conversions to String. The provided char[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + private static byte[] charsToUtf8Bytes(char[] chars) { + final CharBuffer charBuffer = CharBuffer.wrap(chars); + final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); + final byte[] bytes; + if (byteBuffer.hasArray()) { + // there is no guarantee that the byte buffers backing array is the right size + // so we need to make a copy + bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); + Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data + } else { + final int length = byteBuffer.limit() - byteBuffer.position(); + bytes = new byte[length]; + byteBuffer.get(bytes); + // if the buffer is not read only we can reset and fill with 0's + if (byteBuffer.isReadOnly() == false) { + byteBuffer.clear(); // reset + for (int i = 0; i < byteBuffer.limit(); i++) { + byteBuffer.put((byte) 0); + } + } + } + return bytes; + } + + /** + * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding + * conversions to String. The provided byte[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + public static char[] utf8BytesToChars(byte[] utf8Bytes) { + final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); + final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); + final char[] chars; + if (charBuffer.hasArray()) { + // there is no guarantee that the char buffers backing array is the right size + // so we need to make a copy + chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); + Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data + } else { + final int length = charBuffer.limit() - charBuffer.position(); + chars = new char[length]; + charBuffer.get(chars); + // if the buffer is not read only we can reset and fill with 0's + if (charBuffer.isReadOnly() == false) { + charBuffer.clear(); // reset + for (int i = 0; i < charBuffer.limit(); i++) { + charBuffer.put((char) 0); + } + } + } + return chars; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java new file mode 100644 index 0000000000000..b5f2f73e56f51 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; + +/** + * Builder for the reload secure settings nodes request + */ +public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder { + + public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password"; + + public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) { + super(client, action, new NodesReloadSecureSettingsRequest()); + } + + public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { + request.secureStorePassword(secureStorePassword); + return this; + } + + public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { + Objects.requireNonNull(xContentType); + // EMPTY is ok here because we never call namedObject + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser.Token token; + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("expected an object, but found token [{}]", token); + } + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) { + throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME, + token); + } + token = parser.nextToken(); + if (token != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", + SECURE_SETTINGS_PASSWORD_FIELD_NAME, token); + } + final String password = parser.text(); + setSecureStorePassword(new SecureString(password.toCharArray())); + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("expected end of object, but found token [{}]", token); + } + } + return this; + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java new file mode 100644 index 0000000000000..394b1f10dc2d9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import java.io.IOException; +import java.util.List; + +/** + * The response for the reload secure settings action + */ +public class NodesReloadSecureSettingsResponse extends BaseNodesResponse + implements ToXContentFragment { + + public NodesReloadSecureSettingsResponse() { + } + + public NodesReloadSecureSettingsResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeResponse::readNodeResponse); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (final NodesReloadSecureSettingsResponse.NodeResponse node : getNodes()) { + builder.startObject(node.getNode().getId()); + builder.field("name", node.getNode().getName()); + final Exception e = node.reloadException(); + if (e != null) { + builder.startObject("reload_exception"); + ElasticsearchException.generateThrowableXContent(builder, params, e); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (final IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public static class NodeResponse extends BaseNodeResponse { + + private Exception reloadException = null; + + public NodeResponse() { + } + + public NodeResponse(DiscoveryNode node, Exception reloadException) { + super(node); + this.reloadException = reloadException; + } + + public Exception reloadException() { + return this.reloadException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.readBoolean()) { + reloadException = in.readException(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (reloadException != null) { + out.writeBoolean(true); + out.writeException(reloadException); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final NodesReloadSecureSettingsResponse.NodeResponse that = (NodesReloadSecureSettingsResponse.NodeResponse) o; + return reloadException != null ? reloadException.equals(that.reloadException) : that.reloadException == null; + } + + @Override + public int hashCode() { + return reloadException != null ? reloadException.hashCode() : 0; + } + + public static NodeResponse readNodeResponse(StreamInput in) throws IOException { + final NodeResponse node = new NodeResponse(); + node.readFrom(in); + return node; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java new file mode 100644 index 0000000000000..cb870e58d3187 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.reload; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction { + + private final Environment environment; + private final PluginsService pluginsService; + + @Inject + public TransportNodesReloadSecureSettingsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Environment environment, + PluginsService pluginService) { + super(settings, NodesReloadSecureSettingsAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, NodesReloadSecureSettingsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, + NodesReloadSecureSettingsResponse.NodeResponse.class); + this.environment = environment; + this.pluginsService = pluginService; + } + + @Override + protected NodesReloadSecureSettingsResponse newResponse(NodesReloadSecureSettingsRequest request, + List responses, + List failures) { + return new NodesReloadSecureSettingsResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeRequest newNodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { + return new NodeRequest(nodeId, request); + } + + @Override + protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { + return new NodesReloadSecureSettingsResponse.NodeResponse(); + } + + @Override + protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { + final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; + final SecureString secureSettingsPassword = request.secureSettingsPassword(); + try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { + // reread keystore from config file + if (keystore == null) { + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), + new IllegalStateException("Keystore is missing")); + } + // decrypt the keystore using the password from the request + keystore.decrypt(secureSettingsPassword.getChars()); + // add the keystore to the original node settings object + final Settings settingsWithKeystore = Settings.builder() + .put(environment.settings(), false) + .setSecureSettings(keystore) + .build(); + final List exceptions = new ArrayList<>(); + // broadcast the new settings object (with the open embedded keystore) to all reloadable plugins + pluginsService.filterPlugins(ReloadablePlugin.class).stream().forEach(p -> { + try { + p.reload(settingsWithKeystore); + } catch (final Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage("Reload failed for plugin [{}]", p.getClass().getSimpleName()), + e); + exceptions.add(e); + } + }); + ExceptionsHelper.rethrowAndSuppress(exceptions); + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null); + } catch (final Exception e) { + return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); + } + } + + public static class NodeRequest extends BaseNodeRequest { + + NodesReloadSecureSettingsRequest request; + + public NodeRequest() { + } + + NodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { + super(nodeId); + this.request = request; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + request = new NodesReloadSecureSettingsRequest(); + request.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 5b21036b8cd4f..949b0110fff20 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -185,6 +186,11 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(); + /** + * Re initialize each cluster node and pass them the secret store password. + */ + NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings(); + /** * Reroutes allocation of shards. Advance API. */ diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index c0da35a307981..09bc1446a3592 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -41,6 +41,8 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; @@ -783,6 +785,11 @@ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { return new ClusterUpdateSettingsRequestBuilder(this, ClusterUpdateSettingsAction.INSTANCE); } + @Override + public NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings() { + return new NodesReloadSecureSettingsRequestBuilder(this, NodesReloadSecureSettingsAction.INSTANCE); + } + @Override public ActionFuture nodesInfo(final NodesInfoRequest request) { return execute(NodesInfoAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index f47760491f8d5..3a8a06949b29c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -308,7 +308,9 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio } if (formatVersion <= 2) { decryptLegacyEntries(); - assert password.length == 0; + if (password.length != 0) { + throw new IllegalArgumentException("Keystore format does not accept non-empty passwords"); + } return; } diff --git a/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java new file mode 100644 index 0000000000000..ad3a3bcc299d0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/LazyInitializable.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.elasticsearch.common.CheckedSupplier; + +import java.util.Objects; +import java.util.function.Consumer; + +/** + * Encapsulates a {@link CheckedSupplier} which is lazily invoked once on the + * first call to {@code #getOrCompute()}. The value which the + * supplier returns is memorized and will be served until + * {@code #reset()} is called. Each value returned by {@code #getOrCompute()}, + * newly minted or cached, will be passed to the onGet + * {@link Consumer}. On {@code #reset()} the value will be passed to the + * onReset {@code Consumer} and the next {@code #getOrCompute()} + * will regenerate the value. + */ +public final class LazyInitializable { + + private final CheckedSupplier supplier; + private final Consumer onGet; + private final Consumer onReset; + private volatile T value; + + /** + * Creates the simple LazyInitializable instance. + * + * @param supplier + * The {@code CheckedSupplier} to generate values which will be + * served on {@code #getOrCompute()} invocations. + */ + public LazyInitializable(CheckedSupplier supplier) { + this(supplier, v -> {}, v -> {}); + } + + /** + * Creates the complete LazyInitializable instance. + * + * @param supplier + * The {@code CheckedSupplier} to generate values which will be + * served on {@code #getOrCompute()} invocations. + * @param onGet + * A {@code Consumer} which is called on each value, newly forged or + * stale, that is returned by {@code #getOrCompute()} + * @param onReset + * A {@code Consumer} which is invoked on the value that will be + * erased when calling {@code #reset()} + */ + public LazyInitializable(CheckedSupplier supplier, Consumer onGet, Consumer onReset) { + this.supplier = supplier; + this.onGet = onGet; + this.onReset = onReset; + } + + /** + * Returns a value that was created by supplier. The value might + * have been previously created, if not it will be created now, thread safe of + * course. + */ + public T getOrCompute() throws E { + final T readOnce = value; // Read volatile just once... + final T result = readOnce == null ? maybeCompute(supplier) : readOnce; + onGet.accept(result); + return result; + } + + /** + * Clears the value, if it has been previously created by calling + * {@code #getOrCompute()}. The onReset will be called on this + * value. The next call to {@code #getOrCompute()} will recreate the value. + */ + public synchronized void reset() { + if (value != null) { + onReset.accept(value); + value = null; + } + } + + /** + * Creates a new value thread safely. + */ + private synchronized T maybeCompute(CheckedSupplier supplier) throws E { + if (value == null) { + value = Objects.requireNonNull(supplier.get()); + } + return value; + } + +} diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 0ef703448b799..65d47682a95c0 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -74,6 +74,7 @@ *
  • {@link RepositoryPlugin} *
  • {@link ScriptPlugin} *
  • {@link SearchPlugin} + *
  • {@link ReloadablePlugin} * *

    In addition to extension points this class also declares some {@code @Deprecated} {@code public final void onModule} methods. These * methods should cause any extensions of {@linkplain Plugin} that used the pre-5.x style extension syntax to fail to build and point the diff --git a/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java b/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java new file mode 100644 index 0000000000000..86d7759185e69 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/ReloadablePlugin.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.common.settings.Settings; + +/** + * An extension point for {@link Plugin}s that can be reloaded. There is no + * clear definition about what reloading a plugin actually means. When a plugin + * is reloaded it might rebuild any internal members. Plugins usually implement + * this interface in order to reread the values of {@code SecureSetting}s and + * then rebuild any dependent internal members. + */ +public interface ReloadablePlugin { + /** + * Called to trigger the rebuilt of the plugin's internal members. The reload + * operation is required to have been completed when the method returns. + * Strictly speaking, the settings argument should not be accessed + * outside of this method's call stack, as any values stored in the node's + * keystore (see {@code SecureSetting}) will not otherwise be retrievable. The + * setting values do not follow dynamic updates, i.e. the values are identical + * to the ones during the initial plugin loading, barring the keystore file on + * disk changes. Any failure during the operation should be signaled by raising + * an exception, but the plugin should otherwise continue to function + * unperturbed. + * + * @param settings + * Settings used while reloading the plugin. All values are + * retrievable, including the values stored in the node's keystore. + * The setting values are the initial ones, from when the node has be + * started, i.e. they don't follow dynamic updates. + * @throws Exception + * if the operation failed. The plugin should continue to operate as + * if the offending call didn't happen. + */ + void reload(Settings settings) throws Exception; +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java new file mode 100644 index 0000000000000..0697871ea5d1c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public final class RestReloadSecureSettingsAction extends BaseRestHandler { + + public RestReloadSecureSettingsAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, "/_nodes/reload_secure_settings", this); + controller.registerHandler(POST, "/_nodes/{nodeId}/reload_secure_settings", this); + } + + @Override + public String getName() { + return "nodes_reload_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin() + .cluster() + .prepareReloadSecureSettings() + .setTimeout(request.param("timeout")) + .source(request.requiredContent(), request.getXContentType()) + .setNodesIds(nodesIds); + final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request(); + return channel -> nodesRequestBuilder + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) + throws Exception { + builder.startObject(); + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + builder.endObject(); + // clear password for the original request + nodesRequest.secureSettingsPassword().close(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java new file mode 100644 index 0000000000000..2061349e3301d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -0,0 +1,422 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; +import java.security.AccessControlException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.containsString; + +public class ReloadSecureSettingsIT extends ESIntegTestCase { + + public void testMissingKeystoreFile() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + // keystore file should be missing for this test case + Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); + assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the missing keystore case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testNullKeystorePassword() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + reloadSettingsError.set(new AssertionError("Null keystore password should fail")); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + assertThat(e, instanceOf(ActionRequestValidationException.class)); + assertThat(e.getMessage(), containsString("secure settings password cannot be null")); + } catch (final AssertionError ae) { + reloadSettingsError.set(ae); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the null password case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testInvalidKeystoreFile() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // invalid "keystore" file should be present in the config dir + try (InputStream keystore = ReloadSecureSettingsIT.class.getResourceAsStream("invalid.txt.keystore")) { + if (Files.exists(environment.configFile()) == false) { + Files.createDirectory(environment.configFile()); + } + Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); + } + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the invalid keystore format case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testWrongKeystorePassword() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // "some" keystore should be present in this case + writeEmptyKeystore(environment, new char[0]); + final CountDownLatch latch = new CountDownLatch(1); + client().admin() + .cluster() + .prepareReloadSecureSettings() + .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) + .execute(new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IOException.class)); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // in the wrong password case no reload should be triggered + assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); + } + + public void testMisbehavingPlugin() throws Exception { + final Environment environment = internalCluster().getInstance(Environment.class); + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + // make plugins throw on reload + for (final String nodeName : internalCluster().getNodeNames()) { + internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(MisbehavingReloadablePlugin.class) + .stream().findFirst().get().setShouldThrow(true); + } + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + // "some" keystore should be present + final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); + // read seed setting value from the test case (not from the node) + final String seedValue = KeyStoreWrapper.SEED_SETTING + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + // even if one plugin fails to reload (throws Exception), others should be + // unperturbed + assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(1)); + // mock plugin should have been reloaded successfully + assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue)); + } + + public void testReloadWhileKeystoreChanged() throws Exception { + final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); + final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) + .stream().findFirst().get(); + final Environment environment = internalCluster().getInstance(Environment.class); + final int initialReloadCount = mockReloadablePlugin.getReloadCount(); + for (int i = 0; i < randomIntBetween(4, 8); i++) { + // write keystore + final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]); + // read seed setting value from the test case (not from the node) + final String seedValue = KeyStoreWrapper.SEED_SETTING + .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) + .toString(); + // reload call + successfulReloadCall(); + assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue)); + assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(i + 1)); + } + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = Arrays.asList(MockReloadablePlugin.class, MisbehavingReloadablePlugin.class); + // shuffle as reload is called in order + Collections.shuffle(plugins, random()); + return plugins; + } + + private void successfulReloadCall() throws InterruptedException { + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + new ActionListener() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } + + private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + try { + keyStoreWrapper.save(environment.configFile(), password); + } catch (final AccessControlException e) { + if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { + // this is expected: the save method is extra diligent and wants to make sure + // the keystore is readable, not relying on umask and whatnot. It's ok, we don't + // care about this in tests. + } else { + throw e; + } + } + return keyStoreWrapper; + } + + public static class CountingReloadablePlugin extends Plugin implements ReloadablePlugin { + + private volatile int reloadCount; + + public CountingReloadablePlugin() { + } + + @Override + public void reload(Settings settings) throws Exception { + reloadCount++; + } + + public int getReloadCount() { + return reloadCount; + } + + } + + public static class MockReloadablePlugin extends CountingReloadablePlugin { + + private volatile String seedValue; + + public MockReloadablePlugin() { + } + + @Override + public void reload(Settings settings) throws Exception { + super.reload(settings); + this.seedValue = KeyStoreWrapper.SEED_SETTING.get(settings).toString(); + } + + public String getSeedValue() { + return seedValue; + } + + } + + public static class MisbehavingReloadablePlugin extends CountingReloadablePlugin { + + private boolean shouldThrow = false; + + public MisbehavingReloadablePlugin() { + } + + @Override + public synchronized void reload(Settings settings) throws Exception { + super.reload(settings); + if (shouldThrow) { + shouldThrow = false; + throw new Exception("If shouldThrow I throw"); + } + } + + public synchronized void setShouldThrow(boolean shouldThrow) { + this.shouldThrow = shouldThrow; + } + } + +} diff --git a/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore b/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore new file mode 100644 index 0000000000000..04613ffab7f36 --- /dev/null +++ b/server/src/test/resources/org/elasticsearch/action/admin/invalid.txt.keystore @@ -0,0 +1,3 @@ +admin admin +dragon 12345 + From 71b3ac29727a5104142f79cff251bd019c188e25 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 20 Jun 2018 16:42:50 +0200 Subject: [PATCH 03/31] Add Delete Snapshot High Level REST API With this commit we add the delete snapshot API to the Java high level REST client. Relates #27205 Relates #31393 --- .../client/RequestConverters.java | 13 ++++ .../elasticsearch/client/SnapshotClient.java | 32 ++++++++ .../client/RequestConvertersTests.java | 20 +++++ .../org/elasticsearch/client/SnapshotIT.java | 27 +++++++ .../SnapshotClientDocumentationIT.java | 73 +++++++++++++++++++ .../snapshot/delete_snapshot.asciidoc | 73 +++++++++++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../delete/DeleteSnapshotResponse.java | 6 ++ .../delete/DeleteSnapshotResponseTests.java | 41 +++++++++++ 9 files changed, 287 insertions(+) create mode 100644 docs/java-rest/high-level/snapshot/delete_snapshot.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 9d2216b0b5b86..67a8825afb1a7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; @@ -855,6 +856,18 @@ static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) return request; } + static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(deleteSnapshotRequest.repository()) + .addPathPart(deleteSnapshotRequest.snapshot()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout()); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index b7cd2d52732cc..36b4f473ce82f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import java.io.IOException; @@ -161,4 +163,34 @@ public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryReques restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, VerifyRepositoryResponse::fromXContent, listener, emptySet()); } + + /** + * Deletes a snapshot. + * See Snapshot and Restore + * API on elastic.co + * + * @param deleteSnapshotRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeleteSnapshotResponse delete(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options, + DeleteSnapshotResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously deletes a snapshot. + * See Snapshot and Restore + * API on elastic.co + * + * @param deleteSnapshotRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteAsync(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, RequestConverters::deleteSnapshot, options, + DeleteSnapshotResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 6c0ae7c20b856..32c378dddaf91 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -1890,6 +1891,25 @@ public void testVerifyRepository() { assertThat(expectedParams, equalTo(request.getParameters())); } + public void testDeleteSnapshot() { + Map expectedParams = new HashMap<>(); + String repository = randomIndicesNames(1, 1)[0]; + String snapshot = "snapshot-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + + String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s", repository, snapshot); + + DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(); + deleteSnapshotRequest.repository(repository); + deleteSnapshotRequest.snapshot(snapshot); + setRandomMasterTimeout(deleteSnapshotRequest, expectedParams); + + Request request = RequestConverters.deleteSnapshot(deleteSnapshotRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 3b27c2631f4d3..f4d325e158bc5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -28,11 +28,14 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Locale; import static org.hamcrest.Matchers.equalTo; @@ -46,6 +49,13 @@ private PutRepositoryResponse createTestRepository(String repository, String typ highLevelClient().snapshot()::createRepositoryAsync); } + private Response createTestSnapshot(String repository, String snapshot) throws IOException { + Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repository, snapshot)); + createSnapshot.addParameter("wait_for_completion", "true"); + return highLevelClient().getLowLevelClient().performRequest(createSnapshot); + } + + public void testCreateRepository() throws IOException { PutRepositoryResponse response = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); assertTrue(response.isAcknowledged()); @@ -108,4 +118,21 @@ public void testVerifyRepository() throws IOException { highLevelClient().snapshot()::verifyRepositoryAsync); assertThat(response.getNodes().size(), equalTo(1)); } + + public void testDeleteSnapshot() throws IOException { + String repository = "test_repository"; + String snapshot = "test_snapshot"; + + PutRepositoryResponse putRepositoryResponse = createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(putRepositoryResponse.isAcknowledged()); + + Response putSnapshotResponse = createTestSnapshot(repository, snapshot); + // check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead. + assertEquals(200, putSnapshotResponse.getStatusLine().getStatusCode()); + + DeleteSnapshotRequest request = new DeleteSnapshotRequest(repository, snapshot); + DeleteSnapshotResponse response = execute(request, highLevelClient().snapshot()::delete, highLevelClient().snapshot()::deleteAsync); + + assertTrue(response.isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 8c158a91a5111..965f9641e48ad 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -29,8 +29,12 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; @@ -41,6 +45,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -69,6 +74,8 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase private static final String repositoryName = "test_repository"; + private static final String snapshotName = "test_snapshot"; + public void testSnapshotCreateRepository() throws IOException { RestHighLevelClient client = highLevelClient(); @@ -360,10 +367,76 @@ public void onFailure(Exception e) { } } + public void testSnapshotDeleteSnapshot() throws IOException { + RestHighLevelClient client = highLevelClient(); + + createTestRepositories(); + createTestSnapshots(); + + // tag::delete-snapshot-request + DeleteSnapshotRequest request = new DeleteSnapshotRequest(repositoryName); + request.snapshot(snapshotName); + // end::delete-snapshot-request + + // tag::delete-snapshot-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::delete-snapshot-request-masterTimeout + + // tag::delete-snapshot-execute + DeleteSnapshotResponse response = client.snapshot().delete(request, RequestOptions.DEFAULT); + // end::delete-snapshot-execute + + // tag::delete-snapshot-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::delete-snapshot-response + assertTrue(acknowledged); + } + + public void testSnapshotDeleteSnapshotAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + DeleteSnapshotRequest request = new DeleteSnapshotRequest(); + + // tag::delete-snapshot-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(DeleteSnapshotResponse deleteSnapshotResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-snapshot-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-snapshot-execute-async + client.snapshot().deleteAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::delete-snapshot-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + private void createTestRepositories() throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); request.settings("{\"location\": \".\"}", XContentType.JSON); assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged()); } + + private void createTestSnapshots() throws IOException { + Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repositoryName, snapshotName)); + createSnapshot.addParameter("wait_for_completion", "true"); + Response response = highLevelClient().getLowLevelClient().performRequest(createSnapshot); + // check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead. + assertEquals(200, response.getStatusLine().getStatusCode()); + } } diff --git a/docs/java-rest/high-level/snapshot/delete_snapshot.asciidoc b/docs/java-rest/high-level/snapshot/delete_snapshot.asciidoc new file mode 100644 index 0000000000000..a594db5b60259 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/delete_snapshot.asciidoc @@ -0,0 +1,73 @@ +[[java-rest-high-snapshot-delete-snapshot]] +=== Delete Snapshot API + +The Delete Snapshot API allows to delete a snapshot. + +[[java-rest-high-snapshot-delete-snapshot-request]] +==== Delete Snapshot Request + +A `DeleteSnapshotRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-request] +-------------------------------------------------- + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-snapshot-delete-snapshot-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-delete-snapshot-async]] +==== Asynchronous Execution + +The asynchronous execution of a delete snapshot request requires both the +`DeleteSnapshotRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute-async] +-------------------------------------------------- +<1> The `DeleteSnapshotRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `DeleteSnapshotResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-delete-snapshot-response]] +==== Delete Snapshot Response + +The returned `DeleteSnapshotResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-snapshot-response] +-------------------------------------------------- +<1> Indicates the node has acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 17acc8f13c04d..727088aa5737f 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -136,11 +136,13 @@ The Java High Level REST Client supports the following Snapshot APIs: * <> * <> * <> +* <> include::snapshot/get_repository.asciidoc[] include::snapshot/create_repository.asciidoc[] include::snapshot/delete_repository.asciidoc[] include::snapshot/verify_repository.asciidoc[] +include::snapshot/delete_snapshot.asciidoc[] == Tasks APIs diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java index d8de78c3e5b76..ac6e74d39702c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.delete; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.XContentParser; /** * Delete snapshot response @@ -32,4 +33,9 @@ public class DeleteSnapshotResponse extends AcknowledgedResponse { DeleteSnapshotResponse(boolean acknowledged) { super(acknowledged); } + + public static DeleteSnapshotResponse fromXContent(XContentParser parser) { + return new DeleteSnapshotResponse(parseAcknowledged(parser)); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponseTests.java new file mode 100644 index 0000000000000..d77dadfb81edb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotResponseTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.delete; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class DeleteSnapshotResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected DeleteSnapshotResponse doParseInstance(XContentParser parser) { + return DeleteSnapshotResponse.fromXContent(parser); + } + + @Override + protected DeleteSnapshotResponse createBlankInstance() { + return new DeleteSnapshotResponse(); + } + + @Override + protected DeleteSnapshotResponse createTestInstance() { + return new DeleteSnapshotResponse(randomBoolean()); + } +} From 2c6437bfbc96085367ce78c96e2f98e7808ebcf1 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 21 Jun 2018 16:00:26 +0200 Subject: [PATCH 04/31] Return transport addresses from UnicastHostsProvider (#31426) With #20695 we removed local transport and there is just TransportAddress now. The UnicastHostsProvider currently returns DiscoveryNode instances, where, during pinging, we're actually only making use of the TransportAddress to establish a first connection to the possible new node. To simplify the interface, we can just return a list of transport addresses instead, which means that it's not necessary anymore to create fake node objects in each plugin just to return the address information. --- .../classic/AzureUnicastHostsProvider.java | 26 +++---- .../ec2/AwsEc2UnicastHostsProvider.java | 33 ++++---- .../discovery/ec2/Ec2DiscoveryTests.java | 75 +++++++++---------- .../file/FileBasedUnicastHostsProvider.java | 14 ++-- .../FileBasedUnicastHostsProviderTests.java | 43 +++++------ .../gce/GceUnicastHostsProvider.java | 25 +++---- .../discovery/gce/GceDiscoveryTests.java | 53 +++++++------ .../discovery/zen/UnicastHostsProvider.java | 4 +- .../discovery/zen/UnicastZenPing.java | 75 ++++++++----------- .../single/SingleNodeDiscoveryIT.java | 2 +- .../discovery/zen/UnicastZenPingTests.java | 41 +++++----- .../discovery/MockUncasedHostProvider.java | 4 +- 12 files changed, 175 insertions(+), 220 deletions(-) diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 2bc6cc4b130cd..482dafb008fc5 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -24,12 +24,10 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; -import org.elasticsearch.Version; import org.elasticsearch.cloud.azure.classic.AzureServiceDisableException; import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.InetAddresses; @@ -47,9 +45,6 @@ import java.util.ArrayList; import java.util.List; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; - public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { public enum HostType { @@ -104,7 +99,7 @@ public static Deployment fromString(String string) { private final TimeValue refreshInterval; private long lastRefresh; - private List cachedDiscoNodes; + private List dynamicHosts; private final HostType hostType; private final String publicEndpointName; private final String deploymentName; @@ -137,30 +132,30 @@ public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureCom * Setting `cloud.azure.refresh_interval` to `0` will disable caching (default). */ @Override - public List buildDynamicNodes() { + public List buildDynamicHosts() { if (refreshInterval.millis() != 0) { - if (cachedDiscoNodes != null && + if (dynamicHosts != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { logger.trace("using cache to retrieve node list"); - return cachedDiscoNodes; + return dynamicHosts; } lastRefresh = System.currentTimeMillis(); } logger.debug("start building nodes list using Azure API"); - cachedDiscoNodes = new ArrayList<>(); + dynamicHosts = new ArrayList<>(); HostedServiceGetDetailedResponse detailed; try { detailed = azureComputeService.getServiceDetails(); } catch (AzureServiceDisableException e) { logger.debug("Azure discovery service has been disabled. Returning empty list of nodes."); - return cachedDiscoNodes; + return dynamicHosts; } catch (AzureServiceRemoteException e) { // We got a remote exception logger.warn("can not get list of azure nodes: [{}]. Returning empty list of nodes.", e.getMessage()); logger.trace("AzureServiceRemoteException caught", e); - return cachedDiscoNodes; + return dynamicHosts; } InetAddress ipAddress = null; @@ -212,8 +207,7 @@ public List buildDynamicNodes() { TransportAddress[] addresses = transportService.addressesFromString(networkAddress, 1); for (TransportAddress address : addresses) { logger.trace("adding {}, transport_address {}", networkAddress, address); - cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getInstanceName(), address, emptyMap(), - emptySet(), Version.CURRENT.minimumCompatibilityVersion())); + dynamicHosts.add(address); } } catch (Exception e) { logger.warn("can not convert [{}] to transport address. skipping. [{}]", networkAddress, e.getMessage()); @@ -221,9 +215,9 @@ public List buildDynamicNodes() { } } - logger.debug("{} node(s) added", cachedDiscoNodes.size()); + logger.debug("{} addresses added", dynamicHosts.size()); - return cachedDiscoNodes; + return dynamicHosts; } protected String resolveInstanceAddress(final HostType hostType, final RoleInstance instance) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 2c536981b04c5..396e9f707d404 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -29,8 +29,6 @@ import com.amazonaws.services.ec2.model.Tag; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -46,8 +44,6 @@ import java.util.Set; import static java.util.Collections.disjoint; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.TAG_PREFIX; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PRIVATE_DNS; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PRIVATE_IP; @@ -70,7 +66,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos private final String hostType; - private final DiscoNodesCache discoNodes; + private final TransportAddressesCache dynamicHosts; AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { super(settings); @@ -78,7 +74,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos this.awsEc2Service = awsEc2Service; this.hostType = AwsEc2Service.HOST_TYPE_SETTING.get(settings); - this.discoNodes = new DiscoNodesCache(AwsEc2Service.NODE_CACHE_TIME_SETTING.get(settings)); + this.dynamicHosts = new TransportAddressesCache(AwsEc2Service.NODE_CACHE_TIME_SETTING.get(settings)); this.bindAnyGroup = AwsEc2Service.ANY_GROUP_SETTING.get(settings); this.groups = new HashSet<>(); @@ -96,13 +92,13 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos } @Override - public List buildDynamicNodes() { - return discoNodes.getOrRefresh(); + public List buildDynamicHosts() { + return dynamicHosts.getOrRefresh(); } - protected List fetchDynamicNodes() { + protected List fetchDynamicNodes() { - final List discoNodes = new ArrayList<>(); + final List dynamicHosts = new ArrayList<>(); final DescribeInstancesResult descInstances; try (AmazonEc2Reference clientReference = awsEc2Service.client()) { @@ -115,7 +111,7 @@ protected List fetchDynamicNodes() { } catch (final AmazonClientException e) { logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage()); logger.debug("Full exception:", e); - return discoNodes; + return dynamicHosts; } logger.trace("building dynamic unicast discovery nodes..."); @@ -179,8 +175,7 @@ && disjoint(securityGroupIds, groups)) { final TransportAddress[] addresses = transportService.addressesFromString(address, 1); for (int i = 0; i < addresses.length; i++) { logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]); - discoNodes.add(new DiscoveryNode(instance.getInstanceId(), "#cloud-" + instance.getInstanceId() + "-" + i, - addresses[i], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); + dynamicHosts.add(addresses[i]); } } catch (final Exception e) { final String finalAddress = address; @@ -194,9 +189,9 @@ && disjoint(securityGroupIds, groups)) { } } - logger.debug("using dynamic discovery nodes {}", discoNodes); + logger.debug("using dynamic transport addresses {}", dynamicHosts); - return discoNodes; + return dynamicHosts; } private DescribeInstancesRequest buildDescribeInstancesRequest() { @@ -222,11 +217,11 @@ private DescribeInstancesRequest buildDescribeInstancesRequest() { return describeInstancesRequest; } - private final class DiscoNodesCache extends SingleObjectCache> { + private final class TransportAddressesCache extends SingleObjectCache> { private boolean empty = true; - protected DiscoNodesCache(TimeValue refreshInterval) { + protected TransportAddressesCache(TimeValue refreshInterval) { super(refreshInterval, new ArrayList<>()); } @@ -236,8 +231,8 @@ protected boolean needsRefresh() { } @Override - protected List refresh() { - final List nodes = fetchDynamicNodes(); + protected List refresh() { + final List nodes = fetchDynamicNodes(); empty = nodes.isEmpty(); return nodes; } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 43cc924fadb10..9dc2e02edc1b5 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -21,7 +21,6 @@ import com.amazonaws.services.ec2.model.Tag; import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -87,16 +86,16 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi null); } - protected List buildDynamicNodes(Settings nodeSettings, int nodes) { - return buildDynamicNodes(nodeSettings, nodes, null); + protected List buildDynamicHosts(Settings nodeSettings, int nodes) { + return buildDynamicHosts(nodeSettings, nodes, null); } - protected List buildDynamicNodes(Settings nodeSettings, int nodes, List> tagsList) { + protected List buildDynamicHosts(Settings nodeSettings, int nodes, List> tagsList) { try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) { AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service); - List discoveryNodes = provider.buildDynamicNodes(); - logger.debug("--> nodes found: {}", discoveryNodes); - return discoveryNodes; + List dynamicHosts = provider.buildDynamicHosts(); + logger.debug("--> addresses found: {}", dynamicHosts); + return dynamicHosts; } catch (IOException e) { fail("Unexpected IOException"); return null; @@ -107,7 +106,7 @@ public void testDefaultSettings() throws InterruptedException { int nodes = randomInt(10); Settings nodeSettings = Settings.builder() .build(); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); + List discoveryNodes = buildDynamicHosts(nodeSettings, nodes); assertThat(discoveryNodes, hasSize(nodes)); } @@ -119,12 +118,11 @@ public void testPrivateIp() throws InterruptedException { Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_ip") .build(); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); - assertThat(discoveryNodes, hasSize(nodes)); + List transportAddresses = buildDynamicHosts(nodeSettings, nodes); + assertThat(transportAddresses, hasSize(nodes)); // We check that we are using here expected address int node = 1; - for (DiscoveryNode discoveryNode : discoveryNodes) { - TransportAddress address = discoveryNode.getAddress(); + for (TransportAddress address : transportAddresses) { TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PRIVATE_IP + node++); assertEquals(address, expected); } @@ -138,12 +136,11 @@ public void testPublicIp() throws InterruptedException { Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_ip") .build(); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); - assertThat(discoveryNodes, hasSize(nodes)); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); + assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address int node = 1; - for (DiscoveryNode discoveryNode : discoveryNodes) { - TransportAddress address = discoveryNode.getAddress(); + for (TransportAddress address : dynamicHosts) { TransportAddress expected = poorMansDNS.get(AmazonEC2Mock.PREFIX_PUBLIC_IP + node++); assertEquals(address, expected); } @@ -159,13 +156,12 @@ public void testPrivateDns() throws InterruptedException { Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "private_dns") .build(); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); - assertThat(discoveryNodes, hasSize(nodes)); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); + assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address int node = 1; - for (DiscoveryNode discoveryNode : discoveryNodes) { + for (TransportAddress address : dynamicHosts) { String instanceId = "node" + node++; - TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( AmazonEC2Mock.PREFIX_PRIVATE_DNS + instanceId + AmazonEC2Mock.SUFFIX_PRIVATE_DNS); assertEquals(address, expected); @@ -182,13 +178,12 @@ public void testPublicDns() throws InterruptedException { Settings nodeSettings = Settings.builder() .put(AwsEc2Service.HOST_TYPE_SETTING.getKey(), "public_dns") .build(); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes); - assertThat(discoveryNodes, hasSize(nodes)); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes); + assertThat(dynamicHosts, hasSize(nodes)); // We check that we are using here expected address int node = 1; - for (DiscoveryNode discoveryNode : discoveryNodes) { + for (TransportAddress address : dynamicHosts) { String instanceId = "node" + node++; - TransportAddress address = discoveryNode.getAddress(); TransportAddress expected = poorMansDNS.get( AmazonEC2Mock.PREFIX_PUBLIC_DNS + instanceId + AmazonEC2Mock.SUFFIX_PUBLIC_DNS); assertEquals(address, expected); @@ -201,7 +196,7 @@ public void testInvalidHostType() throws InterruptedException { .build(); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { - buildDynamicNodes(nodeSettings, 1); + buildDynamicHosts(nodeSettings, 1); }); assertThat(exception.getMessage(), containsString("does_not_exist is unknown for discovery.ec2.host_type")); } @@ -227,8 +222,8 @@ public void testFilterByTags() throws InterruptedException { } logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); - assertThat(discoveryNodes, hasSize(prodInstances)); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes, tagsList); + assertThat(dynamicHosts, hasSize(prodInstances)); } public void testFilterByMultipleTags() throws InterruptedException { @@ -258,8 +253,8 @@ public void testFilterByMultipleTags() throws InterruptedException { } logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); - assertThat(discoveryNodes, hasSize(prodInstances)); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes, tagsList); + assertThat(dynamicHosts, hasSize(prodInstances)); } public void testReadHostFromTag() throws InterruptedException, UnknownHostException { @@ -285,11 +280,11 @@ public void testReadHostFromTag() throws InterruptedException, UnknownHostExcept } logger.info("started [{}] instances", nodes); - List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); - assertThat(discoveryNodes, hasSize(nodes)); - for (DiscoveryNode discoveryNode : discoveryNodes) { - TransportAddress address = discoveryNode.getAddress(); - TransportAddress expected = poorMansDNS.get(discoveryNode.getName()); + List dynamicHosts = buildDynamicHosts(nodeSettings, nodes, tagsList); + assertThat(dynamicHosts, hasSize(nodes)); + int node = 1; + for (TransportAddress address : dynamicHosts) { + TransportAddress expected = poorMansDNS.get("node" + node++); assertEquals(address, expected); } } @@ -306,13 +301,13 @@ public void testGetNodeListEmptyCache() throws Exception { AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); DummyEc2HostProvider provider = new DummyEc2HostProvider(Settings.EMPTY, transportService, awsEc2Service) { @Override - protected List fetchDynamicNodes() { + protected List fetchDynamicNodes() { fetchCount++; return new ArrayList<>(); } }; for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); + provider.buildDynamicHosts(); } assertThat(provider.fetchCount, is(3)); } @@ -323,18 +318,18 @@ public void testGetNodeListCached() throws Exception { try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) { DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, plugin.ec2Service) { @Override - protected List fetchDynamicNodes() { + protected List fetchDynamicNodes() { fetchCount++; - return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1); + return Ec2DiscoveryTests.this.buildDynamicHosts(Settings.EMPTY, 1); } }; for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); + provider.buildDynamicHosts(); } assertThat(provider.fetchCount, is(1)); Thread.sleep(1_000L); // wait for cache to expire for (int i=0; i<3; i++) { - provider.buildDynamicNodes(); + provider.buildDynamicHosts(); } assertThat(provider.fetchCount, is(2)); } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java index 5a3b26e76f722..4395045ea90f8 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java @@ -21,8 +21,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.env.Environment; @@ -59,7 +59,6 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - static final String UNICAST_HOST_PREFIX = "#zen_file_unicast_host_"; private final TransportService transportService; private final ExecutorService executorService; @@ -79,7 +78,7 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast } @Override - public List buildDynamicNodes() { + public List buildDynamicHosts() { List hostsList; try (Stream lines = Files.lines(unicastHostsFilePath)) { hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments @@ -94,23 +93,22 @@ public List buildDynamicNodes() { hostsList = Collections.emptyList(); } - final List discoNodes = new ArrayList<>(); + final List dynamicHosts = new ArrayList<>(); try { - discoNodes.addAll(resolveHostsLists( + dynamicHosts.addAll(resolveHostsLists( executorService, logger, hostsList, 1, transportService, - UNICAST_HOST_PREFIX, resolveTimeout)); } catch (InterruptedException e) { throw new RuntimeException(e); } - logger.debug("[discovery-file] Using dynamic discovery nodes {}", discoNodes); + logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return discoNodes; + return dynamicHosts; } } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index 3ddd15a7b4cf3..860d3537635d5 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.file; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -50,7 +49,6 @@ import java.util.concurrent.Executors; import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; -import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOST_PREFIX; /** * Tests for {@link FileBasedUnicastHostsProvider}. @@ -104,23 +102,20 @@ public BoundTransportAddress boundAddress() { public void testBuildDynamicNodes() throws Exception { final List hostEntries = Arrays.asList("#comment, should be ignored", "192.168.0.1", "192.168.0.2:9305", "255.255.23.15"); - final List nodes = setupAndRunHostProvider(hostEntries); + final List nodes = setupAndRunHostProvider(hostEntries); assertEquals(hostEntries.size() - 1, nodes.size()); // minus 1 because we are ignoring the first line that's a comment - assertEquals("192.168.0.1", nodes.get(0).getAddress().getAddress()); - assertEquals(9300, nodes.get(0).getAddress().getPort()); - assertEquals(UNICAST_HOST_PREFIX + "192.168.0.1_0#", nodes.get(0).getId()); - assertEquals("192.168.0.2", nodes.get(1).getAddress().getAddress()); - assertEquals(9305, nodes.get(1).getAddress().getPort()); - assertEquals(UNICAST_HOST_PREFIX + "192.168.0.2:9305_0#", nodes.get(1).getId()); - assertEquals("255.255.23.15", nodes.get(2).getAddress().getAddress()); - assertEquals(9300, nodes.get(2).getAddress().getPort()); - assertEquals(UNICAST_HOST_PREFIX + "255.255.23.15_0#", nodes.get(2).getId()); + assertEquals("192.168.0.1", nodes.get(0).getAddress()); + assertEquals(9300, nodes.get(0).getPort()); + assertEquals("192.168.0.2", nodes.get(1).getAddress()); + assertEquals(9305, nodes.get(1).getPort()); + assertEquals("255.255.23.15", nodes.get(2).getAddress()); + assertEquals(9300, nodes.get(2).getPort()); } public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); - final List nodes = setupAndRunHostProvider(hostEntries); - assertEquals(0, nodes.size()); + final List addresses = setupAndRunHostProvider(hostEntries); + assertEquals(0, addresses.size()); } public void testUnicastHostsDoesNotExist() throws Exception { @@ -129,27 +124,27 @@ public void testUnicastHostsDoesNotExist() throws Exception { .build(); final Environment environment = TestEnvironment.newEnvironment(settings); final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment, transportService, executorService); - final List nodes = provider.buildDynamicNodes(); - assertEquals(0, nodes.size()); + final List addresses = provider.buildDynamicHosts(); + assertEquals(0, addresses.size()); } public void testInvalidHostEntries() throws Exception { List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); - List nodes = setupAndRunHostProvider(hostEntries); - assertEquals(0, nodes.size()); + List addresses = setupAndRunHostProvider(hostEntries); + assertEquals(0, addresses.size()); } public void testSomeInvalidHostEntries() throws Exception { List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); - List nodes = setupAndRunHostProvider(hostEntries); - assertEquals(1, nodes.size()); // only one of the two is valid and will be used - assertEquals("192.168.0.1", nodes.get(0).getAddress().getAddress()); - assertEquals(9301, nodes.get(0).getAddress().getPort()); + List addresses = setupAndRunHostProvider(hostEntries); + assertEquals(1, addresses.size()); // only one of the two is valid and will be used + assertEquals("192.168.0.1", addresses.get(0).getAddress()); + assertEquals(9301, addresses.get(0).getPort()); } // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes - private List setupAndRunHostProvider(final List hostEntries) throws IOException { + private List setupAndRunHostProvider(final List hostEntries) throws IOException { final Path homeDir = createTempDir(); final Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) @@ -168,6 +163,6 @@ private List setupAndRunHostProvider(final List hostEntri } return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath), transportService, executorService).buildDynamicNodes(); + new Environment(settings, configPath), transportService, executorService).buildDynamicHosts(); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index de290245895d2..790d70a8b99b0 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -31,9 +31,7 @@ import com.google.api.services.compute.model.NetworkInterface; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.Version; import org.elasticsearch.cloud.gce.GceInstancesService; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkAddress; @@ -47,8 +45,6 @@ import org.elasticsearch.transport.TransportService; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; public class GceUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { @@ -72,7 +68,7 @@ static final class Status { private final TimeValue refreshInterval; private long lastRefresh; - private List cachedDiscoNodes; + private List cachedDynamicHosts; public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstancesService, TransportService transportService, @@ -97,7 +93,7 @@ public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstanc * Information can be cached using `cloud.gce.refresh_interval` property if needed. */ @Override - public List buildDynamicNodes() { + public List buildDynamicHosts() { // We check that needed properties have been set if (this.project == null || this.project.isEmpty() || this.zones == null || this.zones.isEmpty()) { throw new IllegalArgumentException("one or more gce discovery settings are missing. " + @@ -106,16 +102,16 @@ public List buildDynamicNodes() { } if (refreshInterval.millis() != 0) { - if (cachedDiscoNodes != null && + if (cachedDynamicHosts != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list"); - return cachedDiscoNodes; + return cachedDynamicHosts; } lastRefresh = System.currentTimeMillis(); } logger.debug("start building nodes list using GCE API"); - cachedDiscoNodes = new ArrayList<>(); + cachedDynamicHosts = new ArrayList<>(); String ipAddress = null; try { InetAddress inetAddress = networkService.resolvePublishHostAddresses( @@ -133,7 +129,7 @@ public List buildDynamicNodes() { if (instances == null) { logger.trace("no instance found for project [{}], zones [{}].", this.project, this.zones); - return cachedDiscoNodes; + return cachedDynamicHosts; } for (Instance instance : instances) { @@ -238,8 +234,7 @@ public List buildDynamicNodes() { for (TransportAddress transportAddress : addresses) { logger.trace("adding {}, type {}, address {}, transport_address {}, status {}", name, type, ip_private, transportAddress, status); - cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + name + "-" + 0, transportAddress, - emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion())); + cachedDynamicHosts.add(transportAddress); } } } catch (Exception e) { @@ -252,9 +247,9 @@ public List buildDynamicNodes() { logger.warn("exception caught during discovery", e); } - logger.debug("{} node(s) added", cachedDiscoNodes.size()); - logger.debug("using dynamic discovery nodes {}", cachedDiscoNodes); + logger.debug("{} addresses added", cachedDynamicHosts.size()); + logger.debug("using transport addresses {}", cachedDynamicHosts); - return cachedDiscoNodes; + return cachedDynamicHosts; } } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index 31ea9bdb1c21e..a1944a15d8036 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -21,9 +21,9 @@ import org.elasticsearch.Version; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -40,7 +40,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; /** * This test class uses a GCE HTTP Mock system which allows to simulate JSON Responses. @@ -105,13 +104,13 @@ public void stopGceComputeService() throws IOException { } } - protected List buildDynamicNodes(GceInstancesServiceImpl gceInstancesService, Settings nodeSettings) { + protected List buildDynamicNodes(GceInstancesServiceImpl gceInstancesService, Settings nodeSettings) { GceUnicastHostsProvider provider = new GceUnicastHostsProvider(nodeSettings, gceInstancesService, transportService, new NetworkService(Collections.emptyList())); - List discoveryNodes = provider.buildDynamicNodes(); - logger.info("--> nodes found: {}", discoveryNodes); - return discoveryNodes; + List dynamicHosts = provider.buildDynamicHosts(); + logger.info("--> addresses found: {}", dynamicHosts); + return dynamicHosts; } public void testNodesWithDifferentTagsAndNoTagSet() { @@ -120,8 +119,8 @@ public void testNodesWithDifferentTagsAndNoTagSet() { .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } public void testNodesWithDifferentTagsAndOneTagSet() { @@ -131,9 +130,8 @@ public void testNodesWithDifferentTagsAndOneTagSet() { .putList(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(1)); - assertThat(discoveryNodes.get(0).getId(), is("#cloud-test2-0")); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(1)); } public void testNodesWithDifferentTagsAndTwoTagSet() { @@ -143,9 +141,8 @@ public void testNodesWithDifferentTagsAndTwoTagSet() { .putList(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch", "dev") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(1)); - assertThat(discoveryNodes.get(0).getId(), is("#cloud-test2-0")); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(1)); } public void testNodesWithSameTagsAndNoTagSet() { @@ -154,8 +151,8 @@ public void testNodesWithSameTagsAndNoTagSet() { .put(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } public void testNodesWithSameTagsAndOneTagSet() { @@ -165,8 +162,8 @@ public void testNodesWithSameTagsAndOneTagSet() { .putList(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } public void testNodesWithSameTagsAndTwoTagsSet() { @@ -176,8 +173,8 @@ public void testNodesWithSameTagsAndTwoTagsSet() { .putList(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch", "dev") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } public void testMultipleZonesAndTwoNodesInSameZone() { @@ -186,8 +183,8 @@ public void testMultipleZonesAndTwoNodesInSameZone() { .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } public void testMultipleZonesAndTwoNodesInDifferentZones() { @@ -196,8 +193,8 @@ public void testMultipleZonesAndTwoNodesInDifferentZones() { .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "europe-west1-b") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(2)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(2)); } /** @@ -209,8 +206,8 @@ public void testZeroNode43() { .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "us-central1-b") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(0)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(0)); } public void testIllegalSettingsMissingAllRequired() { @@ -261,7 +258,7 @@ public void testNoRegionReturnsEmptyList() { .putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "europe-west1-b", "us-central1-a") .build(); mock = new GceInstancesServiceMock(nodeSettings); - List discoveryNodes = buildDynamicNodes(mock, nodeSettings); - assertThat(discoveryNodes, hasSize(1)); + List dynamicHosts = buildDynamicNodes(mock, nodeSettings); + assertThat(dynamicHosts, hasSize(1)); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java index 9ff3215cd6480..d719f9d123b8c 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java @@ -19,7 +19,7 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; import java.util.List; @@ -31,5 +31,5 @@ public interface UnicastHostsProvider { /** * Builds the dynamic list of unicast hosts to be used for unicast discovery. */ - List buildDynamicNodes(); + List buildDynamicHosts(); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index e9ac1deec0ab4..cbadbb4a1e09b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -118,9 +118,6 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger(); - // used as a node id prefix for configured unicast host nodes/address - private static final String UNICAST_NODE_PREFIX = "#zen_unicast_"; - private final Map activePingingRounds = newConcurrentMap(); // a list of temporal responses a node will return for a request (holds responses from other nodes) @@ -184,23 +181,20 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService * @param hosts the hosts to resolve * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport) * @param transportService the transport service - * @param nodeId_prefix a prefix to use for node ids * @param resolveTimeout the timeout before returning from hostname lookups - * @return a list of discovery nodes with resolved transport addresses + * @return a list of resolved transport addresses */ - public static List resolveHostsLists( + public static List resolveHostsLists( final ExecutorService executorService, final Logger logger, final List hosts, final int limitPortCounts, final TransportService transportService, - final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException { Objects.requireNonNull(executorService); Objects.requireNonNull(logger); Objects.requireNonNull(hosts); Objects.requireNonNull(transportService); - Objects.requireNonNull(nodeId_prefix); Objects.requireNonNull(resolveTimeout); if (resolveTimeout.nanos() < 0) { throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]"); @@ -213,7 +207,7 @@ public static List resolveHostsLists( .collect(Collectors.toList()); final List> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); - final List discoveryNodes = new ArrayList<>(); + final List transportAddresses = new ArrayList<>(); final Set localAddresses = new HashSet<>(); localAddresses.add(transportService.boundAddress().publishAddress()); localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses())); @@ -231,13 +225,7 @@ public static List resolveHostsLists( final TransportAddress address = addresses[addressId]; // no point in pinging ourselves if (localAddresses.contains(address) == false) { - discoveryNodes.add( - new DiscoveryNode( - nodeId_prefix + hostname + "_" + addressId + "#", - address, - emptyMap(), - emptySet(), - Version.CURRENT.minimumCompatibilityVersion())); + transportAddresses.add(address); } } } catch (final ExecutionException e) { @@ -249,7 +237,7 @@ public static List resolveHostsLists( logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname); } } - return discoveryNodes; + return Collections.unmodifiableList(transportAddresses); } @Override @@ -292,29 +280,28 @@ public void ping(final Consumer resultsConsumer, final TimeValue protected void ping(final Consumer resultsConsumer, final TimeValue scheduleDuration, final TimeValue requestDuration) { - final List seedNodes; + final List seedAddresses = new ArrayList<>(); try { - seedNodes = resolveHostsLists( + seedAddresses.addAll(resolveHostsLists( unicastZenPingExecutorService, logger, configuredHosts, limitPortCounts, transportService, - UNICAST_NODE_PREFIX, - resolveTimeout); + resolveTimeout)); } catch (InterruptedException e) { throw new RuntimeException(e); } - seedNodes.addAll(hostsProvider.buildDynamicNodes()); + seedAddresses.addAll(hostsProvider.buildDynamicHosts()); final DiscoveryNodes nodes = contextProvider.clusterState().nodes(); // add all possible master nodes that were active in the last known cluster configuration for (ObjectCursor masterNode : nodes.getMasterNodes().values()) { - seedNodes.add(masterNode.value); + seedAddresses.add(masterNode.value.getAddress()); } final ConnectionProfile connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, requestDuration, requestDuration); - final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedNodes, resultsConsumer, + final PingingRound pingingRound = new PingingRound(pingingRoundIdGenerator.incrementAndGet(), seedAddresses, resultsConsumer, nodes.getLocalNode(), connectionProfile); activePingingRounds.put(pingingRound.id(), pingingRound); final AbstractRunnable pingSender = new AbstractRunnable() { @@ -356,17 +343,17 @@ protected class PingingRound implements Releasable { private final Map tempConnections = new HashMap<>(); private final KeyedLock connectionLock = new KeyedLock<>(true); private final PingCollection pingCollection; - private final List seedNodes; + private final List seedAddresses; private final Consumer pingListener; private final DiscoveryNode localNode; private final ConnectionProfile connectionProfile; private AtomicBoolean closed = new AtomicBoolean(false); - PingingRound(int id, List seedNodes, Consumer resultsConsumer, DiscoveryNode localNode, + PingingRound(int id, List seedAddresses, Consumer resultsConsumer, DiscoveryNode localNode, ConnectionProfile connectionProfile) { this.id = id; - this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + this.seedAddresses = Collections.unmodifiableList(seedAddresses.stream().distinct().collect(Collectors.toList())); this.pingListener = resultsConsumer; this.localNode = localNode; this.connectionProfile = connectionProfile; @@ -381,9 +368,9 @@ public boolean isClosed() { return this.closed.get(); } - public List getSeedNodes() { + public List getSeedAddresses() { ensureOpen(); - return seedNodes; + return seedAddresses; } public Connection getOrConnect(DiscoveryNode node) throws IOException { @@ -457,26 +444,28 @@ protected void sendPings(final TimeValue timeout, final PingingRound pingingRoun final ClusterState lastState = contextProvider.clusterState(); final UnicastPingRequest pingRequest = new UnicastPingRequest(pingingRound.id(), timeout, createPingResponse(lastState)); - Set nodesFromResponses = temporalResponses.stream().map(pingResponse -> { + List temporalAddresses = temporalResponses.stream().map(pingResponse -> { assert clusterName.equals(pingResponse.clusterName()) : "got a ping request from a different cluster. expected " + clusterName + " got " + pingResponse.clusterName(); - return pingResponse.node(); - }).collect(Collectors.toSet()); - - // dedup by address - final Map uniqueNodesByAddress = - Stream.concat(pingingRound.getSeedNodes().stream(), nodesFromResponses.stream()) - .collect(Collectors.toMap(DiscoveryNode::getAddress, Function.identity(), (n1, n2) -> n1)); + return pingResponse.node().getAddress(); + }).collect(Collectors.toList()); + final Stream uniqueAddresses = Stream.concat(pingingRound.getSeedAddresses().stream(), + temporalAddresses.stream()).distinct(); // resolve what we can via the latest cluster state - final Set nodesToPing = uniqueNodesByAddress.values().stream() - .map(node -> { - DiscoveryNode foundNode = lastState.nodes().findByAddress(node.getAddress()); - if (foundNode == null) { - return node; - } else { + final Set nodesToPing = uniqueAddresses + .map(address -> { + DiscoveryNode foundNode = lastState.nodes().findByAddress(address); + if (foundNode != null && transportService.nodeConnected(foundNode)) { return foundNode; + } else { + return new DiscoveryNode( + address.toString(), + address, + emptyMap(), + emptySet(), + Version.CURRENT.minimumCompatibilityVersion()); } }).collect(Collectors.toSet()); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 9527afed5fe03..51869068bb315 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -82,7 +82,7 @@ public void testDoesNotRespondToZenPings() throws Exception { internalCluster().getInstance(TransportService.class); // try to ping the single node directly final UnicastHostsProvider provider = - () -> Collections.singletonList(nodeTransport.getLocalNode()); + () -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); final CountDownLatch latch = new CountDownLatch(1); final DiscoveryNodes nodes = DiscoveryNodes.builder() .add(nodeTransport.getLocalNode()) diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index f71ffe28b50f6..4aa75077431e7 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -408,19 +408,18 @@ public BoundTransportAddress boundAddress() { Collections.emptySet()); closeables.push(transportService); final int limitPortCounts = randomIntBetween(1, 10); - final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = TestUnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList("127.0.0.1"), limitPortCounts, transportService, - "test_", TimeValue.timeValueSeconds(1)); - assertThat(discoveryNodes, hasSize(limitPortCounts)); + assertThat(transportAddresses, hasSize(limitPortCounts)); final Set ports = new HashSet<>(); - for (final DiscoveryNode discoveryNode : discoveryNodes) { - assertTrue(discoveryNode.getAddress().address().getAddress().isLoopbackAddress()); - ports.add(discoveryNode.getAddress().getPort()); + for (final TransportAddress address : transportAddresses) { + assertTrue(address.address().getAddress().isLoopbackAddress()); + ports.add(address.getPort()); } assertThat(ports, equalTo(IntStream.range(9300, 9300 + limitPortCounts).mapToObj(m -> m).collect(Collectors.toSet()))); } @@ -453,19 +452,18 @@ public BoundTransportAddress boundAddress() { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = TestUnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList(NetworkAddress.format(loopbackAddress)), 10, transportService, - "test_", TimeValue.timeValueSeconds(1)); - assertThat(discoveryNodes, hasSize(7)); + assertThat(transportAddresses, hasSize(7)); final Set ports = new HashSet<>(); - for (final DiscoveryNode discoveryNode : discoveryNodes) { - assertTrue(discoveryNode.getAddress().address().getAddress().isLoopbackAddress()); - ports.add(discoveryNode.getAddress().getPort()); + for (final TransportAddress address : transportAddresses) { + assertTrue(address.address().getAddress().isLoopbackAddress()); + ports.add(address.getPort()); } assertThat(ports, equalTo(IntStream.range(9303, 9310).mapToObj(m -> m).collect(Collectors.toSet()))); } @@ -505,17 +503,16 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi Collections.emptySet()); closeables.push(transportService); - final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = TestUnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList(hostname), 1, transportService, - "test_", TimeValue.timeValueSeconds(1) ); - assertThat(discoveryNodes, empty()); + assertThat(transportAddresses, empty()); verify(logger).warn("failed to resolve host [" + hostname + "]", unknownHostException); } @@ -565,16 +562,15 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3)); try { - final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = TestUnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("hostname1", "hostname2"), 1, transportService, - "test+", resolveTimeout); - assertThat(discoveryNodes, hasSize(1)); + assertThat(transportAddresses, hasSize(1)); verify(logger).trace( "resolved host [{}] to {}", "hostname1", new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}); @@ -732,17 +728,16 @@ public BoundTransportAddress boundAddress() { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List discoveryNodes = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = TestUnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), 1, transportService, - "test_", TimeValue.timeValueSeconds(1)); - assertThat(discoveryNodes, hasSize(1)); // only one of the two is valid and will be used - assertThat(discoveryNodes.get(0).getAddress().getAddress(), equalTo("127.0.0.1")); - assertThat(discoveryNodes.get(0).getAddress().getPort(), equalTo(9301)); + assertThat(transportAddresses, hasSize(1)); // only one of the two is valid and will be used + assertThat(transportAddresses.get(0).getAddress(), equalTo("127.0.0.1")); + assertThat(transportAddresses.get(0).getPort(), equalTo(9301)); verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java index 46bbdcc7646c4..2e60a3c518dd3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.zen.UnicastHostsProvider; @@ -55,7 +56,7 @@ public MockUncasedHostProvider(Supplier localNodeSupplier, Cluste } @Override - public List buildDynamicNodes() { + public List buildDynamicHosts() { final DiscoveryNode localNode = getNode(); assert localNode != null; synchronized (activeNodesPerCluster) { @@ -64,6 +65,7 @@ public List buildDynamicNodes() { .map(MockUncasedHostProvider::getNode) .filter(Objects::nonNull) .filter(n -> localNode.equals(n) == false) + .map(DiscoveryNode::getAddress) .collect(Collectors.toList()); } } From 7af6bd1291627279acf3d07940e9c4b197731a56 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 21 Jun 2018 17:13:46 +0200 Subject: [PATCH 05/31] Add a known issue for upgrading from 5.x to 6.3.0 (#31501) This is due to https://github.com/elastic/elasticsearch/issues/31482 --- docs/reference/release-notes/6.3.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/reference/release-notes/6.3.asciidoc b/docs/reference/release-notes/6.3.asciidoc index e47f59118a2bc..8447fb87fb22f 100644 --- a/docs/reference/release-notes/6.3.asciidoc +++ b/docs/reference/release-notes/6.3.asciidoc @@ -1,6 +1,17 @@ [[release-notes-6.3.0]] == {es} version 6.3.0 +[float] +=== Known issues + +Upgrades from any 5.x version will fail for indexes which are prepared using the <>, +or were automatically sync-flushed due to inactivity. If upgrading from those versions, please +wait for 6.3.1 to be released. + +Clusters with a Gold or Platinum license that are upgrading to 6.3 will need to explicitly set +`xpack.security.enabled: true` in `elasticsearch.yml` to upgrade successfully. +If this value is not set, the cluster will be unable to form after upgrade. + Also see <>. [[breaking-6.3.0]] From 54a20d0028cf8b13e503216d62596fcca20219cd Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 21 Jun 2018 08:19:23 -0700 Subject: [PATCH 06/31] [DOCS] Move migration APIs to docs (#31473) --- .../reference/migration/apis}/assistance.asciidoc | 1 + .../reference/migration/apis}/deprecation.asciidoc | 1 + .../reference/migration/apis}/upgrade.asciidoc | 1 + .../reference/migration}/migration.asciidoc | 7 ++++--- docs/reference/rest-api/index.asciidoc | 2 +- 5 files changed, 8 insertions(+), 4 deletions(-) rename {x-pack/docs/en/rest-api/migration => docs/reference/migration/apis}/assistance.asciidoc (99%) rename {x-pack/docs/en/rest-api/migration => docs/reference/migration/apis}/deprecation.asciidoc (99%) rename {x-pack/docs/en/rest-api/migration => docs/reference/migration/apis}/upgrade.asciidoc (99%) rename {x-pack/docs/en/rest-api => docs/reference/migration}/migration.asciidoc (64%) diff --git a/x-pack/docs/en/rest-api/migration/assistance.asciidoc b/docs/reference/migration/apis/assistance.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/migration/assistance.asciidoc rename to docs/reference/migration/apis/assistance.asciidoc index 1af625a97ecff..ae9972cc062bc 100644 --- a/x-pack/docs/en/rest-api/migration/assistance.asciidoc +++ b/docs/reference/migration/apis/assistance.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[migration-api-assistance]] === Migration Assistance API diff --git a/x-pack/docs/en/rest-api/migration/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/migration/deprecation.asciidoc rename to docs/reference/migration/apis/deprecation.asciidoc index 54feee7903af8..a1f0517b82757 100644 --- a/x-pack/docs/en/rest-api/migration/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[migration-api-deprecation]] === Deprecation Info APIs diff --git a/x-pack/docs/en/rest-api/migration/upgrade.asciidoc b/docs/reference/migration/apis/upgrade.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/migration/upgrade.asciidoc rename to docs/reference/migration/apis/upgrade.asciidoc index 839a0057e82fe..39a5638cce111 100644 --- a/x-pack/docs/en/rest-api/migration/upgrade.asciidoc +++ b/docs/reference/migration/apis/upgrade.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[migration-api-upgrade]] === Migration Upgrade API diff --git a/x-pack/docs/en/rest-api/migration.asciidoc b/docs/reference/migration/migration.asciidoc similarity index 64% rename from x-pack/docs/en/rest-api/migration.asciidoc rename to docs/reference/migration/migration.asciidoc index 51f1e5fae0f65..a54da21ab1409 100644 --- a/x-pack/docs/en/rest-api/migration.asciidoc +++ b/docs/reference/migration/migration.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[migration-api]] == Migration APIs @@ -8,6 +9,6 @@ The migration APIs simplify upgrading {xpack} indices from one version to anothe * <> * <> -include::migration/assistance.asciidoc[] -include::migration/upgrade.asciidoc[] -include::migration/deprecation.asciidoc[] +include::apis/assistance.asciidoc[] +include::apis/upgrade.asciidoc[] +include::apis/deprecation.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index e44eea9aa53f4..9ec57940dd299 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -21,7 +21,7 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] -include::{xes-repo-dir}/rest-api/migration.asciidoc[] +include::{es-repo-dir}/migration/migration.asciidoc[] include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] From 0110a9065871785e432337f9c5e7398129c10739 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 21 Jun 2018 13:24:39 -0400 Subject: [PATCH 07/31] Test: Skip assertion on windows Windows doesn't provide consistent exception messages when it can't connect so skip the exception message assertion on windows. Closes #31457 --- .../client/RestClientMultipleHostsIntegTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index d09741ea25b6c..7f5915fe3529d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -42,9 +42,7 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; -import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -216,8 +214,10 @@ public void testNodeSelector() throws IOException { restClient.performRequest(request); fail("expected to fail to connect"); } catch (ConnectException e) { - // This is different in windows and linux but this matches both. - assertThat(e.getMessage(), startsWith("Connection refused")); + // Windows isn't consistent here. Sometimes the message is even null! + if (false == System.getProperty("os.name").startsWith("Windows")) { + assertEquals("Connection refused", e.getMessage()); + } } } else { Response response = restClient.performRequest(request); From 7ee32884e754c29bc9c76b8f4513029254c547d4 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 21 Jun 2018 10:08:50 -0700 Subject: [PATCH 08/31] [DOCS] Creates field and document level security overview (#30937) --- ...field-and-document-access-control.asciidoc | 405 +----------------- .../authorization/role-templates.asciidoc | 71 +++ .../authorization/set-security-user.asciidoc | 61 +++ 3 files changed, 140 insertions(+), 397 deletions(-) create mode 100644 x-pack/docs/en/security/authorization/role-templates.asciidoc create mode 100644 x-pack/docs/en/security/authorization/set-security-user.asciidoc diff --git a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc index cc19c0692457b..119a090232c2f 100644 --- a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc +++ b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -3,9 +3,11 @@ === Setting up field and document level security You can control access to data within an index by adding field and document level -security permissions to a role. Field level security permissions restrict access -to particular fields within a document. Document level security permissions -restrict access to particular documents within an index. +security permissions to a role. +<> restrict access to +particular fields within a document. +<> restrict access +to particular documents within an index. NOTE: Document and field level security is currently meant to operate with read-only privileged accounts. Users with document and field level @@ -23,400 +25,6 @@ grant wider access than intended. Each user has a single set of field level and document level permissions per index. See <>. ===================================================================== -[[field-level-security]] -==== Field level security - -To enable field level security, specify the fields that each role can access -as part of the indices permissions in a role definition. Field level security is -thus bound to a well-defined set of indices (and potentially a set of -<>). - -The following role definition grants read access only to the `category`, -`@timestamp`, and `message` fields in all the `events-*` indices. - -[source,js] --------------------------------------------------- -{ - "indices": [ - { - "names": [ "events-*" ], - "privileges": [ "read" ], - "field_security" : { - "grant" : [ "category", "@timestamp", "message" ] - } - } - ] -} --------------------------------------------------- - -To allow access to the `_all` meta field, you must explicitly list it as an -allowed field. Access to the following meta fields is always allowed: `_id`, -`_type`, `_parent`, `_routing`, `_timestamp`, `_ttl`, `_size` and `_index`. If -you specify an empty list of fields, only these meta fields are accessible. - -NOTE: Omitting the fields entry entirely disables field-level security. - -You can also specify field expressions. For example, the following -example grants read access to all fields that start with an `event_` prefix: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant" : [ "event_*" ] - } - } - ] -} --------------------------------------------------- - -Use the dot notations to refer to nested fields in more complex documents. For -example, assuming the following document: - -[source,js] --------------------------------------------------- -{ - "customer": { - "handle": "Jim", - "email": "jim@mycompany.com", - "phone": "555-555-5555" - } -} --------------------------------------------------- - -The following role definition enables only read access to the customer `handle` -field: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant" : [ "customer.handle" ] - } - } - ] -} --------------------------------------------------- - -This is where wildcard support shines. For example, use `customer.*` to enable -only read access to the `customer` data: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant" : [ "customer.*" ] - } - } - ] -} --------------------------------------------------- - -You can deny permission to access fields with the following syntax: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant" : [ "*"], - "except": [ "customer.handle" ] - } - } - ] -} --------------------------------------------------- - - -The following rules apply: - -* The absence of `field_security` in a role is equivalent to * access. -* If permission has been granted explicitly to some fields, you can specify -denied fields. The denied fields must be a subset of the fields to which -permissions were granted. -* Defining denied and granted fields implies access to all granted fields except -those which match the pattern in the denied fields. - -For example: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "except": [ "customer.handle" ], - "grant" : [ "customer.*" ] - } - } - ] -} --------------------------------------------------- - -In the above example, users can read all fields with the prefix "customer." -except for "customer.handle". - -An empty array for `grant` (for example, `"grant" : []`) means that access has -not been granted to any fields. - -===== Field Level Security and Roles - -When a user has several roles that specify field level permissions, the -resulting field level permissions per index are the union of the individual role -permissions. For example, if these two roles are merged: - -[source,js] --------------------------------------------------- -{ - // role 1 - ... - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant": [ "a.*" ], - "except" : [ "a.b*" ] - } - } - ] -} - -{ - // role 2 - ... - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant": [ "a.b*" ], - "except" : [ "a.b.c*" ] - } - } - ] -} --------------------------------------------------- - -The resulting permission is equal to: - -[source,js] --------------------------------------------------- -{ - // role 1 + role 2 - ... - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "field_security" : { - "grant": [ "a.*" ], - "except" : [ "a.b.c*" ] - } - } - ] -} --------------------------------------------------- - - -[[document-level-security]] -==== Document level security - -Document level security restricts the documents that users have read access to. -To enable document level security, specify a query that matches all the -accessible documents as part of the indices permissions within a role definition. -Document level security is thus bound to a well defined set of indices. - -Enabling document level security restricts which documents can be accessed from -any document-based read API. To enable document level security, you use a query -to specify the documents that each role can access in the `roles.yml` file. -You specify the document query with the `query` option. The document query is -associated with a particular index or index pattern and operates in conjunction -with the privileges specified for the indices. - -The following role definition grants read access only to documents that -belong to the `click` category within all the `events-*` indices: - -[source,js] --------------------------------------------------- -{ - "indices": [ - { - "names": [ "events-*" ], - "privileges": [ "read" ], - "query": "{\"match\": {\"category\": \"click\"}}" - } - ] -} --------------------------------------------------- - -NOTE: Omitting the `query` entry entirely disables document level security for - the respective indices permission entry. - -The specified `query` expects the same format as if it was defined in the -search request and supports the full {es} {ref}/query-dsl.html[Query DSL]. - -For example, the following role grants read access only to the documents whose -`department_id` equals `12`: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "*" ], - "privileges" : [ "read" ], - "query" : { - "term" : { "department_id" : 12 } - } - } - ] -} --------------------------------------------------- - -NOTE: `query` also accepts queries written as string values. - -[[templating-role-query]] -===== Templating a role query - -You can use Mustache templates in a role query to insert the username of the -current authenticated user into the role. Like other places in {es} that support -templating or scripting, you can specify inline, stored, or file-based templates -and define custom parameters. You access the details for the current -authenticated user through the `_user` parameter. - -For example, the following role query uses a template to insert the username -of the current authenticated user: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "my_index" ], - "privileges" : [ "read" ], - "query" : { - "template" : { - "source" : { - "term" : { "acl.username" : "{{_user.username}}" } - } - } - } - } - ] -} --------------------------------------------------- - -You can access the following information through the `_user` variable: - -[options="header"] -|====== -| Property | Description -| `_user.username` | The username of the current authenticated user. -| `_user.full_name` | If specified, the full name of the current authenticated user. -| `_user.email` | If specified, the email of the current authenticated user. -| `_user.roles` | If associated, a list of the role names of the current authenticated user. -| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user. -|====== - -You can also access custom user metadata. For example, if you maintain a -`group_id` in your user metadata, you can apply document level security -based on the `group.id` field in your documents: - -[source,js] --------------------------------------------------- -{ - "indices" : [ - { - "names" : [ "my_index" ], - "privileges" : [ "read" ], - "query" : { - "template" : { - "source" : { - "term" : { "group.id" : "{{_user.metadata.group_id}}" } - } - } - } - } - ] -} --------------------------------------------------- - -[[set-security-user-processor]] -===== Set security user ingest processor - -If an index is shared by many small users it makes sense to put all these users -into the same index. Having a dedicated index or shard per user is wasteful. -To guarantee that a user reads only their own documents, it makes sense to set up -document level security. In this scenario, each document must have the username -or role name associated with it, so that this information can be used by the -role query for document level security. This is a situation where the -`set_security_user` ingest processor can help. - -NOTE: Document level security doesn't apply to write APIs. You must use unique -ids for each user that uses the same index, otherwise they might overwrite other -users' documents. The ingest processor just adds properties for the current -authenticated user to the documents that are being indexed. - -The `set_security_user` processor attaches user-related details (such as -`username`, `roles`, `email`, `full_name` and `metadata` ) from the current -authenticated user to the current document by pre-processing the ingest. When -you index data with an ingest pipeline, user details are automatically attached -to the document. For example: - -[source,js] --------------------------------------------------- -PUT shared-logs/log/1?pipeline=my_pipeline_id -{ - ... -} --------------------------------------------------- - -Read the {ref}/ingest.html[ingest docs] for more information -about setting up a pipeline and other processors. - -[[set-security-user-options]] -.Set Security User Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to store the user information into. -| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. -|====== - -The following example adds all user details for the current authenticated user -to the `user` field for all documents that are processed by this pipeline: - -[source,js] --------------------------------------------------- -{ - "processors" : [ - { - "set_security_user": { - "field": "user" - } - } - ] -} --------------------------------------------------- - [[multiple-roles-dls-fls]] ==== Multiple roles with document and field level security @@ -448,3 +56,6 @@ fields. If you need to restrict access to both documents and fields, consider splitting documents by index instead. + +include::role-templates.asciidoc[] +include::set-security-user.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/role-templates.asciidoc b/x-pack/docs/en/security/authorization/role-templates.asciidoc new file mode 100644 index 0000000000000..1bad73a5d1e94 --- /dev/null +++ b/x-pack/docs/en/security/authorization/role-templates.asciidoc @@ -0,0 +1,71 @@ +[[templating-role-query]] +==== Templating a role query + +When you create a role, you can specify a query that defines the +<>. You can +optionally use Mustache templates in the role query to insert the username of the +current authenticated user into the role. Like other places in {es} that support +templating or scripting, you can specify inline, stored, or file-based templates +and define custom parameters. You access the details for the current +authenticated user through the `_user` parameter. + +For example, the following role query uses a template to insert the username +of the current authenticated user: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/example1 +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "source" : { + "term" : { "acl.username" : "{{_user.username}}" } + } + } + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +You can access the following information through the `_user` variable: + +[options="header"] +|====== +| Property | Description +| `_user.username` | The username of the current authenticated user. +| `_user.full_name` | If specified, the full name of the current authenticated user. +| `_user.email` | If specified, the email of the current authenticated user. +| `_user.roles` | If associated, a list of the role names of the current authenticated user. +| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user. +|====== + +You can also access custom user metadata. For example, if you maintain a +`group_id` in your user metadata, you can apply document level security +based on the `group.id` field in your documents: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/example2 +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "source" : { + "term" : { "group.id" : "{{_user.metadata.group_id}}" } + } + } + } + } + ] +} +-------------------------------------------------- +// CONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/set-security-user.asciidoc b/x-pack/docs/en/security/authorization/set-security-user.asciidoc new file mode 100644 index 0000000000000..92b9ae275aec8 --- /dev/null +++ b/x-pack/docs/en/security/authorization/set-security-user.asciidoc @@ -0,0 +1,61 @@ +[[set-security-user-processor]] +==== Pre-processing documents to add security details + +// If an index is shared by many small users it makes sense to put all these users +// into the same index. Having a dedicated index or shard per user is wasteful. +// TBD: It's unclear why we're putting users in an index here. + +To guarantee that a user reads only their own documents, it makes sense to set up +document level security. In this scenario, each document must have the username +or role name associated with it, so that this information can be used by the +role query for document level security. This is a situation where the +`set_security_user` ingest processor can help. + +NOTE: Document level security doesn't apply to write APIs. You must use unique +ids for each user that uses the same index, otherwise they might overwrite other +users' documents. The ingest processor just adds properties for the current +authenticated user to the documents that are being indexed. + +The `set_security_user` processor attaches user-related details (such as +`username`, `roles`, `email`, `full_name` and `metadata` ) from the current +authenticated user to the current document by pre-processing the ingest. When +you index data with an ingest pipeline, user details are automatically attached +to the document. For example: + +[source,js] +-------------------------------------------------- +PUT shared-logs/log/1?pipeline=my_pipeline_id +{ + ... +} +-------------------------------------------------- +// NOTCONSOLE + +For more information about setting up a pipeline and other processors, see +{ref}/ingest.html[ingest node]. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +|====== + +The following example adds all user details for the current authenticated user +to the `user` field for all documents that are processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE \ No newline at end of file From 4438be14de2a48a6730697554856eed676824b5a Mon Sep 17 00:00:00 2001 From: lcawl Date: Thu, 21 Jun 2018 11:13:19 -0700 Subject: [PATCH 09/31] [DOCS] Remove fixed file from build.gradle --- x-pack/docs/build.gradle | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index cc072609b7af3..d99494921887c 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -11,7 +11,6 @@ apply plugin: 'elasticsearch.docs-test' buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/watcher/put-watch.asciidoc', 'en/security/authentication/user-cache.asciidoc', - 'en/security/authorization/field-and-document-access-control.asciidoc', 'en/security/authorization/run-as-privilege.asciidoc', 'en/security/tribe-clients-integrations/http.asciidoc', 'en/security/authorization/custom-roles-provider.asciidoc', From e0bc4d590d6c86a643a685778b58743849289bda Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 21 Jun 2018 11:32:11 -0700 Subject: [PATCH 10/31] [DOCS] Add code snippet testing in more ML APIs (#31339) --- x-pack/docs/build.gradle | 22 ++++++++++++++----- x-pack/docs/en/rest-api/ml/forecast.asciidoc | 8 +++---- .../en/rest-api/ml/preview-datafeed.asciidoc | 22 ++++++++++--------- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index d99494921887c..06d4d2cbe8ed7 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -47,7 +47,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/watcher/trigger/schedule/yearly.asciidoc', 'en/watcher/troubleshooting.asciidoc', 'en/rest-api/ml/delete-snapshot.asciidoc', - 'en/rest-api/ml/forecast.asciidoc', 'en/rest-api/ml/get-bucket.asciidoc', 'en/rest-api/ml/get-job-stats.asciidoc', 'en/rest-api/ml/get-overall-buckets.asciidoc', @@ -56,7 +55,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/get-influencer.asciidoc', 'en/rest-api/ml/get-snapshot.asciidoc', 'en/rest-api/ml/post-data.asciidoc', - 'en/rest-api/ml/preview-datafeed.asciidoc', 'en/rest-api/ml/revert-snapshot.asciidoc', 'en/rest-api/ml/update-snapshot.asciidoc', 'en/rest-api/watcher/stats.asciidoc', @@ -297,7 +295,9 @@ setups['farequote_index'] = ''' responsetime: type: float airline: - type: keyword + type: keyword + doc_count: + type: integer ''' setups['farequote_data'] = setups['farequote_index'] + ''' - do: @@ -307,11 +307,11 @@ setups['farequote_data'] = setups['farequote_index'] + ''' refresh: true body: | {"index": {"_id":"1"}} - {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000"} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5} {"index": {"_id":"2"}} - {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000"} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23} {"index": {"_id":"3"}} - {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000"} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42} ''' setups['farequote_job'] = setups['farequote_data'] + ''' - do: @@ -333,6 +333,16 @@ setups['farequote_job'] = setups['farequote_data'] + ''' } } ''' +setups['farequote_datafeed'] = setups['farequote_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-farequote" + body: > + { + "job_id":"farequote", + "indexes":"farequote" + } +''' setups['server_metrics_index'] = ''' - do: indices.create: diff --git a/x-pack/docs/en/rest-api/ml/forecast.asciidoc b/x-pack/docs/en/rest-api/ml/forecast.asciidoc index 169debef7b6cb..99647ecae1b25 100644 --- a/x-pack/docs/en/rest-api/ml/forecast.asciidoc +++ b/x-pack/docs/en/rest-api/ml/forecast.asciidoc @@ -5,7 +5,7 @@ Forecast Jobs ++++ -Predict the future behavior of a time series by using historical behavior. +Predicts the future behavior of a time series by using its historical behavior. ==== Request @@ -62,7 +62,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_forecast } -------------------------------------------------- // CONSOLE -// TEST[skip:todo] +// TEST[skip:requires delay] When the forecast is created, you receive the following results: [source,js] @@ -72,7 +72,7 @@ When the forecast is created, you receive the following results: "forecast_id": "wkCWa2IB2lF8nSE_TzZo" } ---- +// NOTCONSOLE You can subsequently see the forecast in the *Single Metric Viewer* in {kib}. -//and in the results that you retrieve by using {ml} APIs such as the -//<> and <>. + diff --git a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc index e6b51f8ef069f..637b506cb9af7 100644 --- a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc +++ b/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc @@ -31,7 +31,6 @@ structure of the data that will be passed to the anomaly detection engine. You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Security Integration @@ -54,27 +53,30 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}: GET _xpack/ml/datafeeds/datafeed-farequote/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:todo] +// TEST[setup:farequote_datafeed] The data that is returned for this example is as follows: [source,js] ---- [ { - "@timestamp": 1454803200000, - "airline": "AAL", - "responsetime": 132.20460510253906 - }, - { - "@timestamp": 1454803200000, + "time": 1454803200000, "airline": "JZA", + "doc_count": 5, "responsetime": 990.4628295898438 }, { - "@timestamp": 1454803200000, + "time": 1454803200000, "airline": "JBU", + "doc_count": 23, "responsetime": 877.5927124023438 }, - ... + { + "time": 1454803200000, + "airline": "KLM", + "doc_count": 42, + "responsetime": 1355.481201171875 + } ] ---- +// TESTRESPONSE From 0883ebee155dd13728150ab75d502acca95e7a05 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 21 Jun 2018 13:50:46 -0400 Subject: [PATCH 11/31] Rename createNewTranslog to fileBasedRecovery (#31508) We renamed `createNewTranslog` to `fileBasedRecovery` in the RecoveryTarget but did not do this for RecoverySourceHandler. This commit makes sure that we a consistent parameter in both recovery source and target. --- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 4 ++-- .../indices/recovery/RecoverySourceHandlerTests.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 72a6fcb6ba329..45500349865f7 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -449,13 +449,13 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } } - void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { StopWatch stopWatch = new StopWatch().start(); logger.trace("recovery [phase1]: prepare remote engine for translog"); final long startEngineStart = stopWatch.totalTime().millis(); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(createNewTranslog, totalTranslogOps)); + cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps)); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 01545b8a681cf..d3264451415d5 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -423,7 +423,7 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO } @Override - void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { prepareTargetForTranslogCalled.set(true); } From 65ce504a85ed60bccba6a77d2553dd409f24a620 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 20 Jun 2018 10:34:08 -0400 Subject: [PATCH 12/31] Remove QueryCachingPolicy#ALWAYS_CACHE (#31451) The QueryCachingPolicy#ALWAYS_CACHE was deprecated in Lucene-7.4 and will be removed in Lucene-8.0. This change replaces it with QueryCachingPolicy. This also makes INDEX_QUERY_CACHE_EVERYTHING_SETTING visible in testing only. --- .../common/settings/IndexScopedSettings.java | 1 - .../elasticsearch/index/shard/IndexShard.java | 12 +++++++++- .../indices/IndicesQueryCacheTests.java | 23 +++++++++++++++---- .../scriptfilter/ScriptQuerySearchIT.java | 4 +++- .../elasticsearch/test/ESIntegTestCase.java | 4 ---- .../test/InternalSettingsPlugin.java | 5 +++- .../test/SecuritySettingsSource.java | 3 ++- 7 files changed, 38 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index eded62c36ebc7..bf33fdb66a02c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -149,7 +149,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, - IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index a03def2cd572b..dfe2f3e1b46b1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -30,6 +30,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.Sort; @@ -298,7 +299,16 @@ public IndexShard( // the query cache is a node-level thing, however we want the most popular filters // to be computed on a per-shard basis if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) { - cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; + cachingPolicy = new QueryCachingPolicy() { + @Override + public void onUse(Query query) { + + } + @Override + public boolean shouldCache(Query query) { + return true; + } + }; } else { cachingPolicy = new UsageTrackingQueryCachingPolicy(); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index 83bde66e3bd21..e155639f143c6 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -89,6 +89,19 @@ public boolean isCacheable(LeafReaderContext ctx) { } + private static QueryCachingPolicy alwaysCachePolicy() { + return new QueryCachingPolicy() { + @Override + public void onUse(Query query) { + + } + @Override + public boolean shouldCache(Query query) { + return true; + } + }; + } + public void testBasics() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); @@ -98,7 +111,7 @@ public void testBasics() throws IOException { ShardId shard = new ShardId("index", "_na_", 0); r = ElasticsearchDirectoryReader.wrap(r, shard); IndexSearcher s = new IndexSearcher(r); - s.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); + s.setQueryCachingPolicy(alwaysCachePolicy()); Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) @@ -169,7 +182,7 @@ public void testTwoShards() throws IOException { ShardId shard1 = new ShardId("index", "_na_", 0); r1 = ElasticsearchDirectoryReader.wrap(r1, shard1); IndexSearcher s1 = new IndexSearcher(r1); - s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); + s1.setQueryCachingPolicy(alwaysCachePolicy()); Directory dir2 = newDirectory(); IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig()); @@ -179,7 +192,7 @@ public void testTwoShards() throws IOException { ShardId shard2 = new ShardId("index", "_na_", 1); r2 = ElasticsearchDirectoryReader.wrap(r2, shard2); IndexSearcher s2 = new IndexSearcher(r2); - s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); + s2.setQueryCachingPolicy(alwaysCachePolicy()); Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) @@ -295,7 +308,7 @@ public void testStatsOnEviction() throws IOException { ShardId shard1 = new ShardId("index", "_na_", 0); r1 = ElasticsearchDirectoryReader.wrap(r1, shard1); IndexSearcher s1 = new IndexSearcher(r1); - s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); + s1.setQueryCachingPolicy(alwaysCachePolicy()); Directory dir2 = newDirectory(); IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig()); @@ -305,7 +318,7 @@ public void testStatsOnEviction() throws IOException { ShardId shard2 = new ShardId("index", "_na_", 1); r2 = ElasticsearchDirectoryReader.wrap(r2, shard2); IndexSearcher s2 = new IndexSearcher(r2); - s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); + s2.setQueryCachingPolicy(alwaysCachePolicy()); Settings settings = Settings.builder() .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10) diff --git a/server/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index 45e374b8697a2..16a9d99b78341 100644 --- a/server/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -31,8 +31,10 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; @@ -52,7 +54,7 @@ public class ScriptQuerySearchIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(CustomScriptPlugin.class); + return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index e63c93f9e10c0..4a5b67c2e6f16 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -434,10 +434,6 @@ public void randomIndexTemplate() throws IOException { if (randomBoolean()) { randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), randomBoolean()); } - - if (randomBoolean()) { - randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean()); - } PutIndexTemplateRequestBuilder putTemplate = client().admin().indices() .preparePutTemplate("random_index_template") .setPatterns(Collections.singletonList("*")) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java index e1c555b811064..be8c824f0f790 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; @@ -51,6 +52,8 @@ public List> getSettings() { INDEX_CREATION_DATE_SETTING, PROVIDED_NAME_SETTING, TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING, - IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING); + IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING, + IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING + ); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index 1690ab652c067..8ad1c61029a97 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -169,7 +169,8 @@ protected void addDefaultSecurityTransportType(Settings.Builder builder, Setting @Override public Collection> nodePlugins() { - return Arrays.asList(LocalStateSecurity.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class); + return Arrays.asList(LocalStateSecurity.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class, + InternalSettingsPlugin.class); } @Override From 9100a702124342ef34c6791c1581e65c0f95fbb5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 21 Jun 2018 23:28:58 -0400 Subject: [PATCH 13/31] Fix missing historyUUID in peer recovery when rolling upgrade 5.x to 6.3 (#31506) Today we make sure that a 5.x index commit should have all required commit tags in RecoveryTarget#cleanFiles method. The reason we do this in RecoveryTarget#cleanFiles method because this is only needed in a file-based recovery and we assume that #cleanFiles should be called in a file-based recovery. However, this assumption is not valid if the index is sealed (.i.e synced-flush). This incorrect assumption would prevent users from rolling upgrade from 5.x to 6.3 if their index were sealed. Closes #31482 --- .../upgrades/FullClusterRestartIT.java | 18 +++++++++++-- .../elasticsearch/upgrades/RecoveryIT.java | 26 +++++++++++++++++++ .../indices/recovery/RecoveryTarget.java | 6 ++--- 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 54e2d88dde769..a205d21f33775 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -24,6 +24,7 @@ import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; @@ -713,8 +714,21 @@ public void testRecovery() throws Exception { // make sure all recoveries are done ensureGreen(index); - // Explicitly flush so we're sure to have a bunch of documents in the Lucene index - client().performRequest("POST", "/_flush"); + // Recovering a synced-flush index from 5.x to 6.x might be subtle as a 5.x index commit does not have all 6.x commit tags. + if (randomBoolean()) { + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + assertOK(resp); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("successful"), equalTo(result.get("total"))); + assertThat(result.get("failed"), equalTo(0)); + }); + } else { + // Explicitly flush so we're sure to have a bunch of documents in the Lucene index + assertOK(client().performRequest(new Request("POST", "/_flush"))); + } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false, diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index f7bbe4847b959..eea8b915fb40e 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -22,6 +22,7 @@ import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -285,4 +286,29 @@ public void testSearchGeoPoints() throws Exception { } } + public void testRecoverSyncedFlushIndex() throws Exception { + final String index = "recover_synced_flush_index"; + if (CLUSTER_TYPE == ClusterType.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + // if the node with the replica is the first to be restarted, while a replica is still recovering + // then delayed allocation will kick in. When the node comes back, the master will search for a copy + // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN + // before timing out + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + createIndex(index, settings.build()); + indexDocs(index, 0, randomInt(5)); + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + assertOK(resp); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("successful"), equalTo(2)); + }); + } + ensureGreen(index); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index e2004eda17fc1..7ef8534f93d02 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -362,6 +362,9 @@ private void ensureRefCount() { @Override public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + if (fileBasedRecovery && indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0)) { + store.ensureIndexHas6xCommitTags(); + } state().getTranslog().totalOperations(totalTranslogOps); indexShard().openEngineAndSkipTranslogRecovery(); } @@ -438,9 +441,6 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa store.incRef(); try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); - if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) { - store.ensureIndexHas6xCommitTags(); - } // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm()); From e9789ce43c3e2db2fff124b78746a7ea8a676071 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 22 Jun 2018 08:37:32 -0400 Subject: [PATCH 14/31] AwaitsFix FullClusterRestartIT#testRecovery Relates #31530 --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index a205d21f33775..021423707b750 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -700,6 +700,7 @@ public void testEmptyShard() throws IOException { * Tests recovery of an index with or without a translog and the * statistics we gather about that. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31530") public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; From f9de42982de3706c03a262f961a3ab8fcd9911ad Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 22 Jun 2018 15:13:31 +0100 Subject: [PATCH 15/31] [ML] Add ML filter update API (#31437) This adds an api to allow updating a filter: POST _xpack/ml/filters/{filter_id}/_update The request body may have: - description: setting a new description - add_items: a list of the items to add - remove_items: a list of the items to remove This commit also changes the PUT filter api to error when the filter_id is already used. As now there is an api for updating filters, the put api should only be used to create new ones. Also, updating a filter results into a notification message auditing the change for every job that is using that filter. --- .../xpack/core/XPackClientPlugin.java | 2 + .../core/ml/action/UpdateFilterAction.java | 192 ++++++++++++++++++ .../xpack/core/ml/job/config/MlFilter.java | 13 +- .../xpack/core/ml/job/messages/Messages.java | 2 + .../autodetect/state/ModelSnapshot.java | 4 +- .../xpack/core/ml/utils/ExceptionsHelper.java | 4 + .../UpdateFilterActionRequestTests.java | 58 ++++++ .../core/ml/job/config/MlFilterTests.java | 9 +- .../xpack/ml/MachineLearning.java | 5 + .../ml/action/TransportGetFiltersAction.java | 9 +- .../ml/action/TransportPutFilterAction.java | 37 ++-- .../action/TransportUpdateFilterAction.java | 175 ++++++++++++++++ .../xpack/ml/job/JobManager.java | 53 +++-- .../persistence/BatchedBucketsIterator.java | 4 +- .../BatchedInfluencersIterator.java | 4 +- .../persistence/BatchedRecordsIterator.java | 4 +- .../xpack/ml/job/persistence/JobProvider.java | 19 +- .../rest/filter/RestUpdateFilterAction.java | 41 ++++ .../xpack/ml/job/JobManagerTests.java | 90 +++++++- .../api/xpack.ml.update_filter.json | 20 ++ .../test/ml/custom_all_field.yml | 2 + .../test/ml/delete_model_snapshot.yml | 2 + .../rest-api-spec/test/ml/filter_crud.yml | 68 ++++++- .../test/ml/get_model_snapshots.yml | 3 + .../rest-api-spec/test/ml/index_layout.yml | 2 + .../rest-api-spec/test/ml/jobs_crud.yml | 4 + .../test/ml/jobs_get_result_buckets.yml | 3 + .../test/ml/jobs_get_result_categories.yml | 3 + .../test/ml/jobs_get_result_influencers.yml | 3 + .../ml/jobs_get_result_overall_buckets.yml | 9 + .../test/ml/jobs_get_result_records.yml | 2 + .../rest-api-spec/test/ml/jobs_get_stats.yml | 2 + .../test/ml/ml_anomalies_default_mappings.yml | 1 + .../test/ml/revert_model_snapshot.yml | 9 + .../test/ml/update_model_snapshot.yml | 2 + .../ml/integration/DetectionRulesIT.java | 12 +- .../smoke-test-ml-with-security/build.gradle | 1 + 37 files changed, 801 insertions(+), 72 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 049089e62cf26..94d81613ee8be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -84,6 +84,7 @@ import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; @@ -220,6 +221,7 @@ public List getClientActions() { OpenJobAction.INSTANCE, GetFiltersAction.INSTANCE, PutFilterAction.INSTANCE, + UpdateFilterAction.INSTANCE, DeleteFilterAction.INSTANCE, KillProcessAction.INSTANCE, GetBucketsAction.INSTANCE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java new file mode 100644 index 0000000000000..cccaec1cd3093 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; + + +public class UpdateFilterAction extends Action { + + public static final UpdateFilterAction INSTANCE = new UpdateFilterAction(); + public static final String NAME = "cluster:admin/xpack/ml/filters/update"; + + private UpdateFilterAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public PutFilterAction.Response newResponse() { + return new PutFilterAction.Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField ADD_ITEMS = new ParseField("add_items"); + public static final ParseField REMOVE_ITEMS = new ParseField("remove_items"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, filterId) -> request.filterId = filterId, MlFilter.ID); + PARSER.declareStringOrNull(Request::setDescription, MlFilter.DESCRIPTION); + PARSER.declareStringArray(Request::setAddItems, ADD_ITEMS); + PARSER.declareStringArray(Request::setRemoveItems, REMOVE_ITEMS); + } + + public static Request parseRequest(String filterId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (request.filterId == null) { + request.filterId = filterId; + } else if (!Strings.isNullOrEmpty(filterId) && !filterId.equals(request.filterId)) { + // If we have both URI and body filter ID, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, MlFilter.ID.getPreferredName(), + request.filterId, filterId)); + } + return request; + } + + private String filterId; + @Nullable + private String description; + private SortedSet addItems = Collections.emptySortedSet(); + private SortedSet removeItems = Collections.emptySortedSet(); + + public Request() { + } + + public Request(String filterId) { + this.filterId = ExceptionsHelper.requireNonNull(filterId, MlFilter.ID.getPreferredName()); + } + + public String getFilterId() { + return filterId; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public SortedSet getAddItems() { + return addItems; + } + + public void setAddItems(Collection addItems) { + this.addItems = new TreeSet<>(ExceptionsHelper.requireNonNull(addItems, ADD_ITEMS.getPreferredName())); + } + + public SortedSet getRemoveItems() { + return removeItems; + } + + public void setRemoveItems(Collection removeItems) { + this.removeItems = new TreeSet<>(ExceptionsHelper.requireNonNull(removeItems, REMOVE_ITEMS.getPreferredName())); + } + + public boolean isNoop() { + return description == null && addItems.isEmpty() && removeItems.isEmpty(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filterId = in.readString(); + description = in.readOptionalString(); + addItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + removeItems = new TreeSet<>(Arrays.asList(in.readStringArray())); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(filterId); + out.writeOptionalString(description); + out.writeStringArray(addItems.toArray(new String[addItems.size()])); + out.writeStringArray(removeItems.toArray(new String[removeItems.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MlFilter.ID.getPreferredName(), filterId); + if (description != null) { + builder.field(MlFilter.DESCRIPTION.getPreferredName(), description); + } + if (addItems.isEmpty() == false) { + builder.field(ADD_ITEMS.getPreferredName(), addItems); + } + if (removeItems.isEmpty() == false) { + builder.field(REMOVE_ITEMS.getPreferredName(), removeItems); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(filterId, description, addItems, removeItems); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(filterId, other.filterId) + && Objects.equals(description, other.description) + && Objects.equals(addItems, other.addItems) + && Objects.equals(removeItems, other.removeItems); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index b11dfd476515c..b45ce73f124fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -56,7 +56,7 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final String description; private final SortedSet items; - public MlFilter(String id, String description, SortedSet items) { + private MlFilter(String id, String description, SortedSet items) { this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); @@ -69,8 +69,7 @@ public MlFilter(StreamInput in) throws IOException { } else { description = null; } - items = new TreeSet<>(); - items.addAll(Arrays.asList(in.readStringArray())); + items = new TreeSet<>(Arrays.asList(in.readStringArray())); } @Override @@ -163,9 +162,13 @@ public Builder setDescription(String description) { return this; } + public Builder setItems(SortedSet items) { + this.items = items; + return this; + } + public Builder setItems(List items) { - this.items = new TreeSet<>(); - this.items.addAll(items); + this.items = new TreeSet<>(items); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 79d8f068d91f8..f0329051fed95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -42,6 +42,8 @@ public final class Messages { public static final String DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL = "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; + public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; + public static final String INCONSISTENT_ID = "Inconsistent {0}; ''{1}'' specified in the body differs from ''{2}'' specified as a URL argument"; public static final String INVALID_ID = "Invalid {0}; ''{1}'' can contain lowercase alphanumeric (a-z and 0-9), hyphens or " + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 6091da5b8c2d6..f6387602fa34a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -18,9 +18,9 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; @@ -318,7 +318,7 @@ public static String v54DocumentId(String jobId, String snapshotId) { public static ModelSnapshot fromJson(BytesReference bytesReference) { try (InputStream stream = bytesReference.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(bytesReference)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { return LENIENT_PARSER.apply(parser, null).build(); } catch (IOException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java index 150c539b1ae3b..d5b83d25ce315 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java @@ -38,6 +38,10 @@ public static ElasticsearchException serverError(String msg, Throwable cause) { return new ElasticsearchException(msg, cause); } + public static ElasticsearchStatusException conflictStatusException(String msg, Throwable cause, Object... args) { + return new ElasticsearchStatusException(msg, RestStatus.CONFLICT, cause, args); + } + public static ElasticsearchStatusException conflictStatusException(String msg, Object... args) { return new ElasticsearchStatusException(msg, RestStatus.CONFLICT, args); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java new file mode 100644 index 0000000000000..f07eba7e90ebb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterActionRequestTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction.Request; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class UpdateFilterActionRequestTests extends AbstractStreamableXContentTestCase { + + private String filterId = randomAlphaOfLength(20); + + @Override + protected Request createTestInstance() { + UpdateFilterAction.Request request = new UpdateFilterAction.Request(filterId); + if (randomBoolean()) { + request.setDescription(randomAlphaOfLength(20)); + } + if (randomBoolean()) { + request.setAddItems(generateRandomStrings()); + } + if (randomBoolean()) { + request.setRemoveItems(generateRandomStrings()); + } + return request; + } + + private static Collection generateRandomStrings() { + int size = randomIntBetween(0, 10); + List strings = new ArrayList<>(size); + for (int i = 0; i < size; ++i) { + strings.add(randomAlphaOfLength(20)); + } + return strings; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(filterId, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index 9ac6683f004c5..c8d8527dc0158 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; +import java.util.SortedSet; import java.util.TreeSet; import static org.hamcrest.Matchers.contains; @@ -43,7 +44,7 @@ public static MlFilter createRandom(String filterId) { for (int i = 0; i < size; i++) { items.add(randomAlphaOfLengthBetween(1, 20)); } - return new MlFilter(filterId, description, items); + return MlFilter.builder(filterId).setDescription(description).setItems(items).build(); } @Override @@ -57,13 +58,13 @@ protected MlFilter doParseInstance(XContentParser parser) { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", new TreeSet<>())); + NullPointerException ex = expectThrows(NullPointerException.class, () -> MlFilter.builder(null).build()); assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); } public void testNullItems() { - NullPointerException ex = - expectThrows(NullPointerException.class, () -> new MlFilter(randomAlphaOfLengthBetween(1, 20), "", null)); + NullPointerException ex = expectThrows(NullPointerException.class, + () -> MlFilter.builder(randomAlphaOfLength(20)).setItems((SortedSet) null).build()); assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 3cb5dc97b0e4c..827e8a4ae0117 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -98,6 +98,7 @@ import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; @@ -149,6 +150,7 @@ import org.elasticsearch.xpack.ml.action.TransportStopDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportUpdateCalendarJobAction; import org.elasticsearch.xpack.ml.action.TransportUpdateDatafeedAction; +import org.elasticsearch.xpack.ml.action.TransportUpdateFilterAction; import org.elasticsearch.xpack.ml.action.TransportUpdateJobAction; import org.elasticsearch.xpack.ml.action.TransportUpdateModelSnapshotAction; import org.elasticsearch.xpack.ml.action.TransportUpdateProcessAction; @@ -197,6 +199,7 @@ import org.elasticsearch.xpack.ml.rest.filter.RestDeleteFilterAction; import org.elasticsearch.xpack.ml.rest.filter.RestGetFiltersAction; import org.elasticsearch.xpack.ml.rest.filter.RestPutFilterAction; +import org.elasticsearch.xpack.ml.rest.filter.RestUpdateFilterAction; import org.elasticsearch.xpack.ml.rest.job.RestCloseJobAction; import org.elasticsearch.xpack.ml.rest.job.RestDeleteJobAction; import org.elasticsearch.xpack.ml.rest.job.RestFlushJobAction; @@ -465,6 +468,7 @@ public List getRestHandlers(Settings settings, RestController restC new RestOpenJobAction(settings, restController), new RestGetFiltersAction(settings, restController), new RestPutFilterAction(settings, restController), + new RestUpdateFilterAction(settings, restController), new RestDeleteFilterAction(settings, restController), new RestGetInfluencersAction(settings, restController), new RestGetRecordsAction(settings, restController), @@ -516,6 +520,7 @@ public List getRestHandlers(Settings settings, RestController restC new ActionHandler<>(OpenJobAction.INSTANCE, TransportOpenJobAction.class), new ActionHandler<>(GetFiltersAction.INSTANCE, TransportGetFiltersAction.class), new ActionHandler<>(PutFilterAction.INSTANCE, TransportPutFilterAction.class), + new ActionHandler<>(UpdateFilterAction.INSTANCE, TransportUpdateFilterAction.class), new ActionHandler<>(DeleteFilterAction.INSTANCE, TransportDeleteFilterAction.class), new ActionHandler<>(KillProcessAction.INSTANCE, TransportKillProcessAction.class), new ActionHandler<>(GetBucketsAction.INSTANCE, TransportGetBucketsAction.class), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java index 4264fa2fc2f57..f20e64a3d93b1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java @@ -22,8 +22,8 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -84,9 +84,8 @@ public void onResponse(GetResponse getDocResponse) { if (getDocResponse.isExists()) { BytesReference docSource = getDocResponse.getSourceAsBytesRef(); try (InputStream stream = docSource.streamInput(); - XContentParser parser = - XContentFactory.xContent(getDocResponse.getSourceAsBytes()) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { MlFilter filter = MlFilter.LENIENT_PARSER.apply(parser, null).build(); responseBody = new QueryPage<>(Collections.singletonList(filter), 1, MlFilter.RESULTS_FIELD); @@ -126,7 +125,7 @@ public void onResponse(SearchResponse response) { for (SearchHit hit : response.getHits().getHits()) { BytesReference docSource = hit.getSourceRef(); try (InputStream stream = docSource.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(docSource)).createParser( + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { docs.add(MlFilter.LENIENT_PARSER.apply(parser, null).build()); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index fc14ef085dd33..9da02cb8f414f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.ml.action; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkAction; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; @@ -20,11 +21,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; -import org.elasticsearch.xpack.ml.job.JobManager; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -37,44 +38,46 @@ public class TransportPutFilterAction extends HandledTransportAction { private final Client client; - private final JobManager jobManager; @Inject public TransportPutFilterAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - Client client, JobManager jobManager) { + IndexNameExpressionResolver indexNameExpressionResolver, Client client) { super(settings, PutFilterAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PutFilterAction.Request::new); this.client = client; - this.jobManager = jobManager; } @Override protected void doExecute(PutFilterAction.Request request, ActionListener listener) { MlFilter filter = request.getFilter(); IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + indexRequest.opType(DocWriteRequest.OpType.CREATE); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); indexRequest.source(filter.toXContent(builder, params)); } catch (IOException e) { throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e); } - BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); - bulkRequestBuilder.add(indexRequest); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), - new ActionListener() { + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, + new ActionListener() { @Override - public void onResponse(BulkResponse indexResponse) { - jobManager.updateProcessOnFilterChanged(filter); + public void onResponse(IndexResponse indexResponse) { listener.onResponse(new PutFilterAction.Response(filter)); } @Override public void onFailure(Exception e) { - listener.onFailure(ExceptionsHelper.serverError("Error putting filter with id [" + filter.getId() + "]", e)); + Exception reportedException; + if (e instanceof VersionConflictEngineException) { + reportedException = new ResourceAlreadyExistsException("A filter with id [" + filter.getId() + + "] already exists"); + } else { + reportedException = ExceptionsHelper.serverError("Error putting filter with id [" + filter.getId() + "]", e); + } + listener.onFailure(reportedException); } }); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java new file mode 100644 index 0000000000000..110d813c643d9 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.job.JobManager; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.SortedSet; +import java.util.TreeSet; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class TransportUpdateFilterAction extends HandledTransportAction { + + private final Client client; + private final JobManager jobManager; + + @Inject + public TransportUpdateFilterAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, + JobManager jobManager) { + super(settings, UpdateFilterAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + UpdateFilterAction.Request::new); + this.client = client; + this.jobManager = jobManager; + } + + @Override + protected void doExecute(UpdateFilterAction.Request request, ActionListener listener) { + ActionListener filterListener = ActionListener.wrap(filterWithVersion -> { + updateFilter(filterWithVersion, request, listener); + }, listener::onFailure); + + getFilterWithVersion(request.getFilterId(), filterListener); + } + + private void updateFilter(FilterWithVersion filterWithVersion, UpdateFilterAction.Request request, + ActionListener listener) { + MlFilter filter = filterWithVersion.filter; + + if (request.isNoop()) { + listener.onResponse(new PutFilterAction.Response(filter)); + return; + } + + String description = request.getDescription() == null ? filter.getDescription() : request.getDescription(); + SortedSet items = new TreeSet<>(filter.getItems()); + items.addAll(request.getAddItems()); + + // Check if removed items are present to avoid typos + for (String toRemove : request.getRemoveItems()) { + boolean wasPresent = items.remove(toRemove); + if (wasPresent == false) { + listener.onFailure(ExceptionsHelper.badRequestException("Cannot remove item [" + toRemove + + "] as it is not present in filter [" + filter.getId() + "]")); + return; + } + } + + MlFilter updatedFilter = MlFilter.builder(filter.getId()).setDescription(description).setItems(items).build(); + indexUpdatedFilter(updatedFilter, filterWithVersion.version, request, listener); + } + + private void indexUpdatedFilter(MlFilter filter, long version, UpdateFilterAction.Request request, + ActionListener listener) { + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + indexRequest.version(version); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true")); + indexRequest.source(filter.toXContent(builder, params)); + } catch (IOException e) { + throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e); + } + + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + jobManager.notifyFilterChanged(filter, request.getAddItems(), request.getRemoveItems()); + listener.onResponse(new PutFilterAction.Response(filter)); + } + + @Override + public void onFailure(Exception e) { + Exception reportedException; + if (e instanceof VersionConflictEngineException) { + reportedException = ExceptionsHelper.conflictStatusException("Error updating filter with id [" + filter.getId() + + "] because it was modified while the update was in progress", e); + } else { + reportedException = ExceptionsHelper.serverError("Error updating filter with id [" + filter.getId() + "]", e); + } + listener.onFailure(reportedException); + } + }); + } + + private void getFilterWithVersion(String filterId, ActionListener listener) { + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId)); + executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { + @Override + public void onResponse(GetResponse getDocResponse) { + try { + if (getDocResponse.isExists()) { + BytesReference docSource = getDocResponse.getSourceAsBytesRef(); + try (InputStream stream = docSource.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + MlFilter filter = MlFilter.LENIENT_PARSER.apply(parser, null).build(); + listener.onResponse(new FilterWithVersion(filter, getDocResponse.getVersion())); + } + } else { + this.onFailure(new ResourceNotFoundException(Messages.getMessage(Messages.FILTER_NOT_FOUND, filterId))); + } + } catch (Exception e) { + this.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + private static class FilterWithVersion { + + private final MlFilter filter; + private final long version; + + private FilterWithVersion(MlFilter filter, long version) { + this.filter = filter; + this.version = version; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 391357076cbbe..a812b7b464c69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -403,26 +403,55 @@ private ClusterState updateClusterState(Job job, boolean overwrite, ClusterState return buildNewClusterState(currentState, builder); } - public void updateProcessOnFilterChanged(MlFilter filter) { + public void notifyFilterChanged(MlFilter filter, Set addedItems, Set removedItems) { + if (addedItems.isEmpty() && removedItems.isEmpty()) { + return; + } + ClusterState clusterState = clusterService.state(); QueryPage jobs = expandJobs("*", true, clusterService.state()); for (Job job : jobs.results()) { - if (isJobOpen(clusterState, job.getId())) { - Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); - if (jobFilters.contains(filter.getId())) { - updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), ActionListener.wrap( - isUpdated -> { - if (isUpdated) { - auditor.info(job.getId(), - Messages.getMessage(Messages.JOB_AUDIT_FILTER_UPDATED_ON_PROCESS, filter.getId())); - } - }, e -> {} - )); + Set jobFilters = job.getAnalysisConfig().extractReferencedFilters(); + if (jobFilters.contains(filter.getId())) { + if (isJobOpen(clusterState, job.getId())) { + updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter), + ActionListener.wrap(isUpdated -> { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); + }, e -> {})); + } else { + auditFilterChanges(job.getId(), filter.getId(), addedItems, removedItems); } } } } + private void auditFilterChanges(String jobId, String filterId, Set addedItems, Set removedItems) { + StringBuilder auditMsg = new StringBuilder("Filter ["); + auditMsg.append(filterId); + auditMsg.append("] has been modified; "); + + if (addedItems.isEmpty() == false) { + auditMsg.append("added items: "); + appendCommaSeparatedSet(addedItems, auditMsg); + if (removedItems.isEmpty() == false) { + auditMsg.append(", "); + } + } + + if (removedItems.isEmpty() == false) { + auditMsg.append("removed items: "); + appendCommaSeparatedSet(removedItems, auditMsg); + } + + auditor.info(jobId, auditMsg.toString()); + } + + private static void appendCommaSeparatedSet(Set items, StringBuilder sb) { + sb.append("["); + Strings.collectionToDelimitedString(items, ", ", "'", "'", sb); + sb.append("]"); + } + public void updateProcessOnCalendarChanged(List calendarJobIds) { ClusterState clusterState = clusterService.state(); final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java index 17b4b8edadfa2..53526e2a4753d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -30,7 +30,7 @@ class BatchedBucketsIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), bucket); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java index d084325350fc5..fe8bd3aaa3af7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -29,7 +29,7 @@ class BatchedInfluencersIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Influencer influencer = Influencer.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), influencer); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java index c0940dfd5aad1..22c107f771ba5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -30,7 +30,7 @@ class BatchedRecordsIterator extends BatchedResultsIterator { protected Result map(SearchHit hit) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)).createParser(NamedXContentRegistry.EMPTY, + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)){ AnomalyRecord record = AnomalyRecord.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), record); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java index 9db1877df1850..578ddd1efc78a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java @@ -50,7 +50,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; @@ -477,7 +476,7 @@ private T parseSearchHit(SearchHit hit, BiFunction Consumer errorHandler) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { return objectParser.apply(parser, null); } catch (IOException e) { @@ -528,7 +527,7 @@ public void buckets(String jobId, BucketsQueryBuilder query, Consumer modelPlot(String jobId, int from, int size) { for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { ModelPlot modelPlot = ModelPlot.LENIENT_PARSER.apply(parser, null); results.add(modelPlot); @@ -1232,10 +1231,8 @@ public void onResponse(GetResponse getDocResponse) { BytesReference docSource = getDocResponse.getSourceAsBytesRef(); try (InputStream stream = docSource.streamInput(); - XContentParser parser = - XContentFactory.xContent(XContentHelper.xContentType(docSource)) - .createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { Calendar calendar = Calendar.LENIENT_PARSER.apply(parser, null).build(); listener.onResponse(calendar); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java new file mode 100644 index 0000000000000..80acf3d7e4e35 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestUpdateFilterAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest.filter; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; + +public class RestUpdateFilterAction extends BaseRestHandler { + + public RestUpdateFilterAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, + MachineLearning.BASE_PATH + "filters/{" + MlFilter.ID.getPreferredName() + "}/_update", this); + } + + @Override + public String getName() { + return "xpack_ml_update_filter_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String filterId = restRequest.param(MlFilter.ID.getPreferredName()); + XContentParser parser = restRequest.contentOrSourceParamParser(); + UpdateFilterAction.Request putFilterRequest = UpdateFilterAction.Request.parseRequest(filterId, parser); + return channel -> client.execute(UpdateFilterAction.INSTANCE, putFilterRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 42b0a56f49a82..cf925963c198a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -41,12 +41,14 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.Matchers; +import org.mockito.Mockito; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.TreeSet; import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask; @@ -174,7 +176,16 @@ public void onFailure(Exception e) { }); } - public void testUpdateProcessOnFilterChanged() { + public void testNotifyFilterChangedGivenNoop() { + MlFilter filter = MlFilter.builder("my_filter").build(); + JobManager jobManager = createJobManager(); + + jobManager.notifyFilterChanged(filter, Collections.emptySet(), Collections.emptySet()); + + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChanged() { Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); detectorReferencingFilter.setByFieldName("foo"); DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); @@ -208,11 +219,18 @@ public void testUpdateProcessOnFilterChanged() { .build(); when(clusterService.state()).thenReturn(clusterState); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(true); + return null; + }).when(updateJobProcessNotifier).submitJobUpdate(any(), any()); + JobManager jobManager = createJobManager(); MlFilter filter = MlFilter.builder("foo_filter").setItems("a", "b").build(); - jobManager.updateProcessOnFilterChanged(filter); + jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("item 1", "item 2")), + new TreeSet<>(Collections.singletonList("item 3"))); ArgumentCaptor updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class); verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture(), any(ActionListener.class)); @@ -223,6 +241,74 @@ public void testUpdateProcessOnFilterChanged() { assertThat(capturedUpdateParams.get(0).getFilter(), equalTo(filter)); assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(jobReferencingFilter2.getId())); assertThat(capturedUpdateParams.get(1).getFilter(), equalTo(filter)); + + verify(auditor).info(jobReferencingFilter1.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + verify(auditor).info(jobReferencingFilter2.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + verify(auditor).info(jobReferencingFilter3.getId(), "Filter [foo_filter] has been modified; added items: " + + "['item 1', 'item 2'], removed items: ['item 3']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChangedGivenOnlyAddedItems() { + Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); + detectorReferencingFilter.setByFieldName("foo"); + DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); + detectorReferencingFilter.setRules(Collections.singletonList(filterRule)); + AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( + detectorReferencingFilter.build())); + + Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); + jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(jobReferencingFilter.build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MLMetadataField.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobManager jobManager = createJobManager(); + + MlFilter filter = MlFilter.builder("foo_filter").build(); + + jobManager.notifyFilterChanged(filter, new TreeSet<>(Arrays.asList("a", "b")), Collections.emptySet()); + + verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; added items: ['a', 'b']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); + } + + public void testNotifyFilterChangedGivenOnlyRemovedItems() { + Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null); + detectorReferencingFilter.setByFieldName("foo"); + DetectionRule filterRule = new DetectionRule.Builder(RuleScope.builder().exclude("foo", "foo_filter")).build(); + detectorReferencingFilter.setRules(Collections.singletonList(filterRule)); + AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList( + detectorReferencingFilter.build())); + + Job.Builder jobReferencingFilter = buildJobBuilder("job-referencing-filter"); + jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); + + MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); + mlMetadata.putJob(jobReferencingFilter.build(), false); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) + .metaData(MetaData.builder() + .putCustom(MLMetadataField.TYPE, mlMetadata.build())) + .build(); + when(clusterService.state()).thenReturn(clusterState); + + JobManager jobManager = createJobManager(); + + MlFilter filter = MlFilter.builder("foo_filter").build(); + + jobManager.notifyFilterChanged(filter, Collections.emptySet(), new TreeSet<>(Arrays.asList("a", "b"))); + + verify(auditor).info(jobReferencingFilter.getId(), "Filter [foo_filter] has been modified; removed items: ['a', 'b']"); + Mockito.verifyNoMoreInteractions(auditor, updateJobProcessNotifier); } public void testUpdateProcessOnCalendarChanged() { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json new file mode 100644 index 0000000000000..06aceea4c1240 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_filter.json @@ -0,0 +1,20 @@ +{ + "xpack.ml.update_filter": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/filters/{filter_id}/_update", + "paths": [ "/_xpack/ml/filters/{filter_id}/_update" ], + "parts": { + "filter_id": { + "type": "string", + "required": true, + "description": "The ID of the filter to update" + } + } + }, + "body": { + "description" : "The filter update", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml index ffbbf4d95bdda..c206a08e6ca91 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml @@ -30,6 +30,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-custom-all-test-1 type: doc @@ -56,6 +57,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-custom-all-test-2 type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml index 1a587c47fd573..c13b2473cc785 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -34,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot type: doc @@ -76,6 +77,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index 4c184d34c995e..d787e07b8c28c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -4,6 +4,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-meta type: doc @@ -112,25 +113,25 @@ setup: "Test create filter api": - do: xpack.ml.put_filter: - filter_id: filter-foo2 + filter_id: new-filter body: > { "description": "A newly created filter", "items": ["abc", "xyz"] } - - match: { filter_id: filter-foo2 } + - match: { filter_id: new-filter } - match: { description: "A newly created filter" } - match: { items: ["abc", "xyz"]} - do: xpack.ml.get_filters: - filter_id: "filter-foo2" + filter_id: "new-filter" - match: { count: 1 } - match: filters.0: - filter_id: "filter-foo2" + filter_id: "new-filter" description: "A newly created filter" items: ["abc", "xyz"] @@ -146,6 +147,65 @@ setup: "items": ["abc", "xyz"] } +--- +"Test update filter given no filter matches filter_id": + - do: + catch: missing + xpack.ml.update_filter: + filter_id: "missing_filter" + body: > + { + } + +--- +"Test update filter": + - do: + xpack.ml.put_filter: + filter_id: "test_update_filter" + body: > + { + "description": "old description", + "items": ["a", "b"] + } + - match: { filter_id: test_update_filter } + + - do: + xpack.ml.update_filter: + filter_id: "test_update_filter" + body: > + { + "description": "new description", + "add_items": ["c", "d"], + "remove_items": ["a"] + } + - match: { filter_id: test_update_filter } + - match: { description: "new description" } + - match: { items: ["b", "c", "d"] } + + - do: + xpack.ml.get_filters: + filter_id: "test_update_filter" + - match: + filters.0: + filter_id: "test_update_filter" + description: "new description" + items: ["b", "c", "d"] + + - do: + xpack.ml.delete_filter: + filter_id: test_update_filter + +--- +"Test update filter given remove item is not present": + - do: + catch: /Cannot remove item \[not present item\] as it is not present in filter \[filter-foo\]/ + xpack.ml.update_filter: + filter_id: "filter-foo" + body: > + { + "remove_items": ["not present item"] + } + --- "Test delete in-use filter": - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml index 57cc80ae2fb73..e411251363b71 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots type: doc @@ -33,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-state type: doc @@ -44,6 +46,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml index c13ae86e06f50..6a60bbb96da6f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -556,6 +556,8 @@ - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index df505176ae739..3b08753e20913 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -419,6 +419,8 @@ - match: { job_id: "jobs-crud-model-memory-limit-decrease" } - do: + headers: + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -929,6 +931,8 @@ "Test cannot create job with existing result document": - do: + headers: + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml index 2a7a7970e5db2..125f8cbf7f8d2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc @@ -34,6 +35,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc @@ -50,6 +52,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml index 565f1612f89a2..307a1d0a80d7e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories type: doc @@ -26,6 +27,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories type: doc @@ -34,6 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-unrelated type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml index 50f0cfc6816bc..9b875fb1afd86 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc @@ -36,6 +37,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc @@ -55,6 +57,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-get-influencers-test type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml index 75f35f311177c..249ff7c72d7ad 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml @@ -59,6 +59,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -75,6 +76,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -91,6 +93,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -123,6 +126,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -139,6 +143,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -155,6 +160,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -171,6 +177,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -187,6 +194,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -203,6 +211,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml index b5dae2045f440..513e1fb875774 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records type: doc @@ -34,6 +35,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index 61bcf63e39869..b841c8c23069f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -226,6 +226,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc @@ -250,6 +251,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml index 42fca7b81a036..0f01613203704 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -19,6 +19,7 @@ - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-shared type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml index a66c0da12d0a9..ce638fdceaa19 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml @@ -34,6 +34,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -61,6 +62,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -88,6 +90,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -103,6 +106,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -118,6 +122,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -133,6 +138,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -148,6 +154,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -163,6 +170,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc @@ -180,6 +188,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot type: doc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml index 6a1d6e117e924..9966ae668c08f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml @@ -18,6 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot type: doc @@ -67,6 +68,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot type: doc diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index fbda8ad716b2c..7f018f967fbfd 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; @@ -34,6 +35,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isOneOf; @@ -177,10 +179,12 @@ public void testScope() throws Exception { assertThat(records.get(0).getOverFieldValue(), equalTo("333.333.333.333")); // Now let's update the filter - MlFilter updatedFilter = MlFilter.builder(safeIps.getId()).setItems("333.333.333.333").build(); - assertThat(putMlFilter(updatedFilter).getFilter(), equalTo(updatedFilter)); + UpdateFilterAction.Request updateFilterRequest = new UpdateFilterAction.Request(safeIps.getId()); + updateFilterRequest.setRemoveItems(safeIps.getItems()); + updateFilterRequest.setAddItems(Collections.singletonList("333.333.333.333")); + client().execute(UpdateFilterAction.INSTANCE, updateFilterRequest).get(); - // Wait until the notification that the process was updated is indexed + // Wait until the notification that the filter was updated is indexed assertBusy(() -> { SearchResponse searchResponse = client().prepareSearch(".ml-notifications") .setSize(1) @@ -191,7 +195,7 @@ public void testScope() throws Exception { ).get(); SearchHit[] hits = searchResponse.getHits().getHits(); assertThat(hits.length, equalTo(1)); - assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Updated filter [safe_ips] in running process")); + assertThat((String) hits[0].getSourceAsMap().get("message"), containsString("Filter [safe_ips] has been modified")); }); long secondAnomalyTime = timestamp; diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/qa/smoke-test-ml-with-security/build.gradle index ebe55c2b7ef29..58e5eca3600f6 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/qa/smoke-test-ml-with-security/build.gradle @@ -42,6 +42,7 @@ integTestRunner { 'ml/filter_crud/Test get filter API with bad ID', 'ml/filter_crud/Test invalid param combinations', 'ml/filter_crud/Test non-existing filter', + 'ml/filter_crud/Test update filter given remove item is not present', 'ml/get_datafeed_stats/Test get datafeed stats given missing datafeed_id', 'ml/get_datafeeds/Test get datafeed given missing datafeed_id', 'ml/jobs_crud/Test cannot create job with existing categorizer state document', From e15bedf7133b0a602e16446ddcbd3ade3e527d01 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 22 Jun 2018 15:31:23 +0200 Subject: [PATCH 16/31] Allow multiple unicast host providers (#31509) Introduces support for multiple host providers, which allows the settings based hosts resolver to be treated just as any other UnicastHostsProvider. Also introduces the notion of a HostsResolver so that plugins such as FileBasedDiscovery do not need to create their own thread pool for resolving hosts, making it easier to add new similar kind of plugins. --- .../classic/AzureUnicastHostsProvider.java | 2 +- .../ec2/AwsEc2UnicastHostsProvider.java | 2 +- .../discovery/ec2/Ec2DiscoveryTests.java | 8 +- .../file/FileBasedDiscoveryPlugin.java | 50 +------------ .../file/FileBasedUnicastHostsProvider.java | 37 +-------- .../FileBasedUnicastHostsProviderTests.java | 12 ++- .../gce/GceUnicastHostsProvider.java | 2 +- .../discovery/gce/GceDiscoveryTests.java | 2 +- .../common/settings/ClusterSettings.java | 3 +- .../discovery/DiscoveryModule.java | 47 ++++++++---- .../zen/SettingsBasedHostsProvider.java | 75 +++++++++++++++++++ .../discovery/zen/UnicastHostsProvider.java | 12 ++- .../discovery/zen/UnicastZenPing.java | 64 ++++++---------- .../discovery/DiscoveryModuleTests.java | 36 ++++++++- .../single/SingleNodeDiscoveryIT.java | 2 +- .../discovery/zen/UnicastZenPingTests.java | 45 ++++++----- .../discovery/zen/ZenDiscoveryUnitTests.java | 2 +- .../discovery/MockUncasedHostProvider.java | 2 +- .../test/discovery/TestZenDiscovery.java | 4 +- .../license/TribeTransportTestCase.java | 6 +- 20 files changed, 227 insertions(+), 186 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 482dafb008fc5..1a9265de2a72f 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -132,7 +132,7 @@ public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureCom * Setting `cloud.azure.refresh_interval` to `0` will disable caching (default). */ @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { if (refreshInterval.millis() != 0) { if (dynamicHosts != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 396e9f707d404..8f5037042986b 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -92,7 +92,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { return dynamicHosts.getOrRefresh(); } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 9dc2e02edc1b5..295df0c818a91 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -93,7 +93,7 @@ protected List buildDynamicHosts(Settings nodeSettings, int no protected List buildDynamicHosts(Settings nodeSettings, int nodes, List> tagsList) { try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) { AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service); - List dynamicHosts = provider.buildDynamicHosts(); + List dynamicHosts = provider.buildDynamicHosts(null); logger.debug("--> addresses found: {}", dynamicHosts); return dynamicHosts; } catch (IOException e) { @@ -307,7 +307,7 @@ protected List fetchDynamicNodes() { } }; for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(3)); } @@ -324,12 +324,12 @@ protected List fetchDynamicNodes() { } }; for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(1)); Thread.sleep(1_000L); // wait for cache to expire for (int i=0; i<3; i++) { - provider.buildDynamicHosts(); + provider.buildDynamicHosts(null); } assertThat(provider.fetchCount, is(2)); } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index a8f3337d50da2..4d26447078597 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,36 +19,17 @@ package org.elasticsearch.discovery.file; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.watcher.ResourceWatcherService; -import java.io.IOException; import java.nio.file.Path; -import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; /** @@ -58,48 +39,19 @@ */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private static final Logger logger = Loggers.getLogger(FileBasedDiscoveryPlugin.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - private final Settings settings; private final Path configPath; - private ExecutorService fileBasedDiscoveryExecutorService; public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { this.settings = settings; this.configPath = configPath; } - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - final int concurrentConnects = UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[file_based_discovery_resolve]"); - fileBasedDiscoveryExecutorService = EsExecutors.newScaling( - Node.NODE_NAME_SETTING.get(settings) + "/" + "file_based_discovery_resolve", - 0, - concurrentConnects, - 60, - TimeUnit.SECONDS, - threadFactory, - threadPool.getThreadContext()); - - return Collections.emptyList(); - } - - @Override - public void close() throws IOException { - ThreadPool.terminate(fileBasedDiscoveryExecutorService, 0, TimeUnit.SECONDS); - } - @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { return Collections.singletonMap( "file", - () -> new FileBasedUnicastHostsProvider( - new Environment(settings, configPath), transportService, fileBasedDiscoveryExecutorService)); + () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java index 4395045ea90f8..584ae4de5a2b5 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java @@ -23,27 +23,19 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.env.Environment; -import org.elasticsearch.transport.TransportService; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT; -import static org.elasticsearch.discovery.zen.UnicastZenPing.resolveHostsLists; - /** * An implementation of {@link UnicastHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. @@ -60,25 +52,15 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - private final TransportService transportService; - private final ExecutorService executorService; - private final Path unicastHostsFilePath; - private final AtomicLong nodeIdGenerator = new AtomicLong(); // generates unique ids for the node - - private final TimeValue resolveTimeout; - - FileBasedUnicastHostsProvider(Environment environment, TransportService transportService, ExecutorService executorService) { + FileBasedUnicastHostsProvider(Environment environment) { super(environment.settings()); - this.transportService = transportService; - this.executorService = executorService; this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - this.resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { List hostsList; try (Stream lines = Files.lines(unicastHostsFilePath)) { hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments @@ -93,21 +75,8 @@ public List buildDynamicHosts() { hostsList = Collections.emptyList(); } - final List dynamicHosts = new ArrayList<>(); - try { - dynamicHosts.addAll(resolveHostsLists( - executorService, - logger, - hostsList, - 1, - transportService, - resolveTimeout)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - + final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index 860d3537635d5..5837d3bcdfe3f 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -24,7 +24,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -123,8 +125,10 @@ public void testUnicastHostsDoesNotExist() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment, transportService, executorService); - final List addresses = provider.buildDynamicHosts(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); assertEquals(0, addresses.size()); } @@ -163,6 +167,8 @@ private List setupAndRunHostProvider(final List hostEn } return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath), transportService, executorService).buildDynamicHosts(); + new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 790d70a8b99b0..778c38697c5ec 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -93,7 +93,7 @@ public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstanc * Information can be cached using `cloud.gce.refresh_interval` property if needed. */ @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { // We check that needed properties have been set if (this.project == null || this.project.isEmpty() || this.zones == null || this.zones.isEmpty()) { throw new IllegalArgumentException("one or more gce discovery settings are missing. " + diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index a1944a15d8036..816152186e761 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -108,7 +108,7 @@ protected List buildDynamicNodes(GceInstancesServiceImpl gceIn GceUnicastHostsProvider provider = new GceUnicastHostsProvider(nodeSettings, gceInstancesService, transportService, new NetworkService(Collections.emptyList())); - List dynamicHosts = provider.buildDynamicHosts(); + List dynamicHosts = provider.buildDynamicHosts(null); logger.info("--> addresses found: {}", dynamicHosts); return dynamicHosts; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d39440e66dfd0..0e67fd4855b3c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -57,6 +57,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; +import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; @@ -360,7 +361,7 @@ public void apply(Settings value, Settings current, Settings previous) { ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING, ZenDiscovery.MAX_PENDING_CLUSTER_STATES_SETTING, - UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, + SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT, SearchService.DEFAULT_KEEPALIVE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 179692cd516c8..e47fe7a7a70ed 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -31,7 +31,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -42,13 +44,15 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * A module for loading classes for node discovery. @@ -57,8 +61,8 @@ public class DiscoveryModule { public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", "zen", Function.identity(), Property.NodeScope); - public static final Setting> DISCOVERY_HOSTS_PROVIDER_SETTING = - new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope); + public static final Setting> DISCOVERY_HOSTS_PROVIDER_SETTING = + Setting.listSetting("discovery.zen.hosts_provider", Collections.emptyList(), Function.identity(), Property.NodeScope); private final Discovery discovery; @@ -66,9 +70,9 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, AllocationService allocationService) { - final UnicastHostsProvider hostsProvider; final Collection> joinValidators = new ArrayList<>(); - Map> hostProviders = new HashMap<>(); + final Map> hostProviders = new HashMap<>(); + hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { @@ -80,17 +84,32 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic joinValidators.add(joinValidator); } } - Optional hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); - if (hostsProviderName.isPresent()) { - Supplier hostsProviderSupplier = hostProviders.get(hostsProviderName.get()); - if (hostsProviderSupplier == null) { - throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName.get() + "]"); - } - hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get()); - } else { - hostsProvider = Collections::emptyList; + List hostsProviderNames = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); + // for bwc purposes, add settings provider even if not explicitly specified + if (hostsProviderNames.contains("settings") == false) { + List extendedHostsProviderNames = new ArrayList<>(); + extendedHostsProviderNames.add("settings"); + extendedHostsProviderNames.addAll(hostsProviderNames); + hostsProviderNames = extendedHostsProviderNames; + } + + final Set missingProviderNames = new HashSet<>(hostsProviderNames); + missingProviderNames.removeAll(hostProviders.keySet()); + if (missingProviderNames.isEmpty() == false) { + throw new IllegalArgumentException("Unknown zen hosts providers " + missingProviderNames); } + List filteredHostsProviders = hostsProviderNames.stream() + .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); + + final UnicastHostsProvider hostsProvider = hostsResolver -> { + final List addresses = new ArrayList<>(); + for (UnicastHostsProvider provider : filteredHostsProviders) { + addresses.addAll(provider.buildDynamicHosts(hostsResolver)); + } + return Collections.unmodifiableList(addresses); + }; + Map> discoveryTypes = new HashMap<>(); discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java new file mode 100644 index 0000000000000..6d6453c776e68 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.TransportService; + +import java.util.List; +import java.util.function.Function; + +import static java.util.Collections.emptyList; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from the "discovery.zen.ping.unicast.hosts" node setting. If the port is + * left off an entry, a default port of 9300 is assumed. + * + * An example unicast hosts setting might look as follows: + * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] + */ +public class SettingsBasedHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = + Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), Setting.Property.NodeScope); + + // these limits are per-address + public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; + public static final int LIMIT_LOCAL_PORTS_COUNT = 5; + + private final List configuredHosts; + + private final int limitPortCounts; + + public SettingsBasedHostsProvider(Settings settings, TransportService transportService) { + super(settings); + + if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { + configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); + // we only limit to 1 address, makes no sense to ping 100 ports + limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; + } else { + // if unicast hosts are not specified, fill with simple defaults on the local machine + configuredHosts = transportService.getLocalAddresses(); + limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; + } + + logger.debug("using initial hosts {}", configuredHosts); + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + return hostsResolver.resolveHosts(configuredHosts, limitPortCounts); + } + +} diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java index d719f9d123b8c..86410005c92bf 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastHostsProvider.java @@ -31,5 +31,15 @@ public interface UnicastHostsProvider { /** * Builds the dynamic list of unicast hosts to be used for unicast discovery. */ - List buildDynamicHosts(); + List buildDynamicHosts(HostsResolver hostsResolver); + + /** + * Helper object that allows to resolve a list of hosts to a list of transport addresses. + * Each host is resolved into a transport address (or a collection of addresses if the + * number of ports is greater than one) + */ + interface HostsResolver { + List resolveHosts(List hosts, int limitPortCounts); + } + } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index cbadbb4a1e09b..9c86fa17e9b06 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -82,11 +82,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -94,26 +92,15 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = - Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), - Property.NodeScope); public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope); public static final Setting DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT = Setting.positiveTimeSetting("discovery.zen.ping.unicast.hosts.resolve_timeout", TimeValue.timeValueSeconds(5), Property.NodeScope); - // these limits are per-address - public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; - public static final int LIMIT_LOCAL_PORTS_COUNT = 5; - private final ThreadPool threadPool; private final TransportService transportService; private final ClusterName clusterName; - private final List configuredHosts; - - private final int limitPortCounts; - private final PingContextProvider contextProvider; private final AtomicInteger pingingRoundIdGenerator = new AtomicInteger(); @@ -141,19 +128,10 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService this.contextProvider = contextProvider; final int concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { - configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); - // we only limit to 1 addresses, makes no sense to ping 100 ports - limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; - } else { - // if unicast hosts are not specified, fill with simple defaults on the local machine - configuredHosts = transportService.getLocalAddresses(); - limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; - } + resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); logger.debug( - "using initial hosts {}, with concurrent_connects [{}], resolve_timeout [{}]", - configuredHosts, + "using concurrent_connects [{}], resolve_timeout [{}]", concurrentConnects, resolveTimeout); @@ -172,9 +150,9 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService } /** - * Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses - * if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done - * in parallel using specified executor service up to the specified resolve timeout. + * Resolves a list of hosts to a list of transport addresses. Each host is resolved into a transport address (or a collection of + * addresses if the number of ports is greater than one). Host lookups are done in parallel using specified executor service up + * to the specified resolve timeout. * * @param executorService the executor service used to parallelize hostname lookups * @param logger logger used for logging messages regarding hostname lookups @@ -190,7 +168,7 @@ public static List resolveHostsLists( final List hosts, final int limitPortCounts, final TransportService transportService, - final TimeValue resolveTimeout) throws InterruptedException { + final TimeValue resolveTimeout) { Objects.requireNonNull(executorService); Objects.requireNonNull(logger); Objects.requireNonNull(hosts); @@ -205,8 +183,13 @@ public static List resolveHostsLists( .stream() .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) .collect(Collectors.toList()); - final List> futures = - executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); + final List> futures; + try { + futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return Collections.emptyList(); + } final List transportAddresses = new ArrayList<>(); final Set localAddresses = new HashSet<>(); localAddresses.add(transportService.boundAddress().publishAddress()); @@ -232,6 +215,9 @@ public static List resolveHostsLists( assert e.getCause() != null; final String message = "failed to resolve host [" + hostname + "]"; logger.warn(message, e.getCause()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore } } else { logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname); @@ -240,6 +226,11 @@ public static List resolveHostsLists( return Collections.unmodifiableList(transportAddresses); } + private UnicastHostsProvider.HostsResolver createHostsResolver() { + return (hosts, limitPortCounts) -> resolveHostsLists(unicastZenPingExecutorService, logger, hosts, + limitPortCounts, transportService, resolveTimeout); + } + @Override public void close() { ThreadPool.terminate(unicastZenPingExecutorService, 10, TimeUnit.SECONDS); @@ -281,18 +272,7 @@ protected void ping(final Consumer resultsConsumer, final TimeValue scheduleDuration, final TimeValue requestDuration) { final List seedAddresses = new ArrayList<>(); - try { - seedAddresses.addAll(resolveHostsLists( - unicastZenPingExecutorService, - logger, - configuredHosts, - limitPortCounts, - transportService, - resolveTimeout)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - seedAddresses.addAll(hostsProvider.buildDynamicHosts()); + seedAddresses.addAll(hostsProvider.buildDynamicHosts(createHostsResolver())); final DiscoveryNodes nodes = contextProvider.clusterState().nodes(); // add all possible master nodes that were active in the last known cluster configuration for (ObjectCursor masterNode : nodes.getMasterNodes().values()) { diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 18829d515973d..f2491b2db1f9a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -137,11 +137,10 @@ public void testDuplicateDiscovery() { public void testHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "custom").build(); - final UnicastHostsProvider provider = Collections::emptyList; AtomicBoolean created = new AtomicBoolean(false); DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { created.set(true); - return Collections::emptyList; + return hostsResolver -> Collections.emptyList(); }); newModule(settings, Collections.singletonList(plugin)); assertTrue(created.get()); @@ -151,7 +150,7 @@ public void testUnknownHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> newModule(settings, Collections.emptyList())); - assertEquals("Unknown zen hosts provider [dne]", e.getMessage()); + assertEquals("Unknown zen hosts providers [dne]", e.getMessage()); } public void testDuplicateHostsProvider() { @@ -162,6 +161,37 @@ public void testDuplicateHostsProvider() { assertEquals("Cannot register zen hosts provider [dup] twice", e.getMessage()); } + public void testSettingsHostsProvider() { + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("settings", () -> null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + newModule(Settings.EMPTY, Arrays.asList(plugin))); + assertEquals("Cannot register zen hosts provider [settings] twice", e.getMessage()); + } + + public void testMultiHostsProvider() { + AtomicBoolean created1 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin1 = () -> Collections.singletonMap("provider1", () -> { + created1.set(true); + return hostsResolver -> Collections.emptyList(); + }); + AtomicBoolean created2 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin2 = () -> Collections.singletonMap("provider2", () -> { + created2.set(true); + return hostsResolver -> Collections.emptyList(); + }); + AtomicBoolean created3 = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin3 = () -> Collections.singletonMap("provider3", () -> { + created3.set(true); + return hostsResolver -> Collections.emptyList(); + }); + Settings settings = Settings.builder().putList(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), + "provider1", "provider3").build(); + newModule(settings, Arrays.asList(plugin1, plugin2, plugin3)); + assertTrue(created1.get()); + assertFalse(created2.get()); + assertTrue(created3.get()); + } + public void testLazyConstructionHostsProvider() { DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 51869068bb315..e3e7905f43150 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -82,7 +82,7 @@ public void testDoesNotRespondToZenPings() throws Exception { internalCluster().getInstance(TransportService.class); // try to ping the single node directly final UnicastHostsProvider provider = - () -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); + hostsResolver -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); final CountDownLatch latch = new CountDownLatch(1); final DiscoveryNodes nodes = DiscoveryNodes.builder() .add(nodeTransport.getLocalNode()) diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index 4aa75077431e7..eef926a1e1238 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -137,8 +137,6 @@ public void tearDown() throws Exception { } } - private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; - public void testSimplePings() throws IOException, InterruptedException, ExecutionException { // use ephemeral ports final Settings settings = Settings.builder().put("cluster.name", "test").put(TcpTransport.PORT.getKey(), 0).build(); @@ -182,7 +180,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); final ClusterState stateMismatch = ClusterState.builder(new ClusterName("mismatch")).version(randomNonNegativeLong()).build(); - Settings hostsSettings = Settings.builder() + final Settings hostsSettings = Settings.builder() .putList("discovery.zen.ping.unicast.hosts", NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())), NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())), @@ -196,22 +194,21 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) .build(); - TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); ClusterState stateC = ClusterState.builder(stateMismatch) .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, - EMPTY_HOSTS_PROVIDER, () -> stateC) { + TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, () -> stateC) { @Override protected Version getVersion() { return versionD; @@ -223,8 +220,7 @@ protected Version getVersion() { ClusterState stateD = ClusterState.builder(stateMismatch) .nodes(DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D")) .build(); - TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, - EMPTY_HOSTS_PROVIDER, () -> stateD); + TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, () -> stateD); zenPingD.start(); closeables.push(zenPingD); @@ -329,21 +325,21 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); ClusterState stateC = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER, () -> stateC); + TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, () -> stateC); zenPingC.start(); closeables.push(zenPingC); @@ -408,7 +404,7 @@ public BoundTransportAddress boundAddress() { Collections.emptySet()); closeables.push(transportService); final int limitPortCounts = randomIntBetween(1, 10); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList("127.0.0.1"), @@ -452,7 +448,7 @@ public BoundTransportAddress boundAddress() { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Collections.singletonList(NetworkAddress.format(loopbackAddress)), @@ -503,7 +499,7 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList(hostname), @@ -562,7 +558,7 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 3)); try { - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("hostname1", "hostname2"), @@ -610,6 +606,7 @@ public void testResolveReuseExistingNodeConnections() throws ExecutionException, hostsSettingsBuilder.put("discovery.zen.ping.unicast.hosts", (String) null); } final Settings hostsSettings = hostsSettingsBuilder.build(); + final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); // connection to reuse @@ -627,14 +624,14 @@ public void onConnectionOpened(Transport.Connection connection) { .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")) .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); final ClusterState stateB = ClusterState.builder(state) .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); @@ -669,19 +666,20 @@ public void testPingingTemporalPings() throws ExecutionException, InterruptedExc .put("cluster.name", "test") .put("discovery.zen.ping.unicast.hosts", (String) null) // use nodes for simplicity .build(); + final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); final ClusterState stateA = ClusterState.builder(state) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")).build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER, () -> stateA); + final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); zenPingA.start(); closeables.push(zenPingA); // Node B doesn't know about A! final ClusterState stateB = ClusterState.builder(state).nodes( DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")).build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER, () -> stateB); + TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); zenPingB.start(); closeables.push(zenPingB); @@ -728,7 +726,7 @@ public BoundTransportAddress boundAddress() { new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); closeables.push(transportService); - final List transportAddresses = TestUnicastZenPing.resolveHostsLists( + final List transportAddresses = UnicastZenPing.resolveHostsLists( executorService, logger, Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), @@ -828,9 +826,10 @@ private static class NetworkHandle { private static class TestUnicastZenPing extends UnicastZenPing { TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle, - UnicastHostsProvider unicastHostsProvider, PingContextProvider contextProvider) { + PingContextProvider contextProvider) { super(Settings.builder().put("node.name", networkHandle.node.getName()).put(settings).build(), - threadPool, networkHandle.transportService, unicastHostsProvider, contextProvider); + threadPool, networkHandle.transportService, + new SettingsBasedHostsProvider(settings, networkHandle.transportService), contextProvider); } volatile CountDownLatch allTasksCompleted; diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 9273ab1514372..a60a23bcd6d5c 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -317,7 +317,7 @@ public void onNewClusterState(String source, Supplier clusterState } }; ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService(), + masterService, clusterApplier, clusterSettings, hostsResolver -> Collections.emptyList(), ESAllocationTestCase.createAllocationService(), Collections.emptyList()); zenDiscovery.start(); return zenDiscovery; diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java index 2e60a3c518dd3..dc9304637cdca 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockUncasedHostProvider.java @@ -56,7 +56,7 @@ public MockUncasedHostProvider(Supplier localNodeSupplier, Cluste } @Override - public List buildDynamicHosts() { + public List buildDynamicHosts(HostsResolver hostsResolver) { final DiscoveryNode localNode = getNode(); assert localNode != null; synchronized (activeNodesPerCluster) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 11f9e38e665ff..5387a659aa274 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -45,7 +45,7 @@ import java.util.Map; import java.util.function.Supplier; -import static org.elasticsearch.discovery.zen.UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; +import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; /** * A alternative zen discovery which allows using mocks for things like pings, as well as @@ -84,7 +84,7 @@ public Map> getZenHostsProviders(Transpor final Supplier supplier; if (USE_MOCK_PINGS.get(settings)) { // we have to return something in order for the unicast host provider setting to resolve to something. It will never be used - supplier = () -> () -> { + supplier = () -> hostsResolver -> { throw new UnsupportedOperationException(); }; } else { diff --git a/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java b/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java index f9b836d18cc44..a2ee9446e70a1 100644 --- a/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java +++ b/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.UnicastZenPing; +import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.env.Environment; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; @@ -191,9 +191,9 @@ public Collection> transportClientPlugins() { tribe1Defaults.normalizePrefix("tribe.t1."); tribe2Defaults.normalizePrefix("tribe.t2."); // give each tribe it's unicast hosts to connect to - tribe1Defaults.putList("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), + tribe1Defaults.putList("tribe.t1." + SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(internalCluster().client())); - tribe1Defaults.putList("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), + tribe1Defaults.putList("tribe.t2." + SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client())); Settings merged = Settings.builder() From c72eea55e2af33e91aaf0669d86da81e920a107b Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 22 Jun 2018 10:27:54 -0400 Subject: [PATCH 17/31] Retry synced-flush in FullClusterRestartIT#testRecovery Today we examine the response of a synced-flush, then issue another request if the current one is not entirely successful. However, this approach is not correct as method #performRequest throws ResponseException for a partial result. Closes #31530 --- .../upgrades/FullClusterRestartIT.java | 14 +++++++++----- .../org/elasticsearch/upgrades/RecoveryIT.java | 13 +++++++++---- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 021423707b750..1e5a78172c88d 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.CheckedFunction; @@ -720,11 +721,14 @@ public void testRecovery() throws Exception { // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. assertBusy(() -> { - Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); - assertOK(resp); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("successful"), equalTo(result.get("total"))); - assertThat(result.get("failed"), equalTo(0)); + try { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("successful"), equalTo(result.get("total"))); + assertThat(result.get("failed"), equalTo(0)); + } catch (ResponseException ex) { + throw new AssertionError(ex); // cause assert busy to retry + } }); } else { // Explicitly flush so we're sure to have a bunch of documents in the Lucene index diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index eea8b915fb40e..a68d1d84b4464 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -303,10 +304,14 @@ public void testRecoverSyncedFlushIndex() throws Exception { // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. assertBusy(() -> { - Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); - assertOK(resp); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("successful"), equalTo(2)); + try { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("successful"), equalTo(result.get("total"))); + assertThat(result.get("failed"), equalTo(0)); + } catch (ResponseException ex) { + throw new AssertionError(ex); // cause assert busy to retry + } }); } ensureGreen(index); From ec6dd09c85190980ae5401a25a3729ae8197e589 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 22 Jun 2018 13:46:48 +0200 Subject: [PATCH 18/31] Avoid deprecation warning when running the ML datafeed extractor. (#31463) In #29639 we added a `format` option to doc-value fields and deprecated usage of doc-value fields without a format so that we could migrate doc-value fields to use the format that comes with the mappings by default. However I missed to fix the machine-learning datafeed extractor. --- .../ml/datafeed/extractor/scroll/ExtractedField.java | 8 +++++++- .../ml/datafeed/extractor/scroll/ScrollDataExtractor.java | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java index c2d866563d638..ef0dffa269114 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java @@ -103,7 +103,13 @@ public Object[] value(SearchHit hit) { if (value.length != 1) { return value; } - value[0] = ((BaseDateTime) value[0]).getMillis(); + if (value[0] instanceof String) { // doc_value field with the epoch_millis format + value[0] = Long.parseLong((String) value[0]); + } else if (value[0] instanceof BaseDateTime) { // script field + value[0] = ((BaseDateTime) value[0]).getMillis(); + } else { + throw new IllegalStateException("Unexpected value for a time field: " + value[0].getClass()); + } return value; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 24174730e2d3b..fd7df8e4333b1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -19,6 +19,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; @@ -46,6 +47,7 @@ class ScrollDataExtractor implements DataExtractor { private static final Logger LOGGER = Loggers.getLogger(ScrollDataExtractor.class); private static final TimeValue SCROLL_TIMEOUT = new TimeValue(30, TimeUnit.MINUTES); + private static final String EPOCH_MILLIS_FORMAT = "epoch_millis"; private final Client client; private final ScrollDataExtractorContext context; @@ -114,7 +116,11 @@ private SearchRequestBuilder buildSearchRequest(long start) { context.query, context.extractedFields.timeField(), start, context.end)); for (String docValueField : context.extractedFields.getDocValueFields()) { - searchRequestBuilder.addDocValueField(docValueField); + if (docValueField.equals(context.extractedFields.timeField())) { + searchRequestBuilder.addDocValueField(docValueField, EPOCH_MILLIS_FORMAT); + } else { + searchRequestBuilder.addDocValueField(docValueField, DocValueFieldsContext.USE_DEFAULT_FORMAT); + } } String[] sourceFields = context.extractedFields.getSourceFields(); if (sourceFields.length == 0) { From 8037651a6aca0b384ed17f5c3133ab20ff786e0e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 22 Jun 2018 16:17:17 +0200 Subject: [PATCH 19/31] Upgrade to Lucene 7.4.0. (#31529) This moves Elasticsearch from a recent 7.4.0 snapshot to the GA release. --- buildSrc/version.properties | 2 +- .../lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 | 1 + ...lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 | 1 + ...lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 | 1 + ...cene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 | 1 + .../lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 | 1 + .../lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 | 1 + server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-core-7.4.0.jar.sha1 | 1 + .../licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-grouping-7.4.0.jar.sha1 | 1 + .../lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-highlighter-7.4.0.jar.sha1 | 1 + server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-join-7.4.0.jar.sha1 | 1 + .../licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-memory-7.4.0.jar.sha1 | 1 + server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-misc-7.4.0.jar.sha1 | 1 + .../licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-queries-7.4.0.jar.sha1 | 1 + .../lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-queryparser-7.4.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-sandbox-7.4.0.jar.sha1 | 1 + .../licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial-7.4.0.jar.sha1 | 1 + .../lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 | 1 + .../lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-spatial3d-7.4.0.jar.sha1 | 1 + .../licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 | 1 - server/licenses/lucene-suggest-7.4.0.jar.sha1 | 1 + .../licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 | 1 - x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 | 1 + 49 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-core-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-join-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.4.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.4.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 78c48f7935722..bf32f41858164 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.4.0 -lucene = 7.4.0-snapshot-518d303506 +lucene = 7.4.0 # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 2e666a2d566b0..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a57659a275921d8ab3f7ec580e9bf713ce6143b1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..2b14a61f264fa --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0.jar.sha1 @@ -0,0 +1 @@ +9f0a326f7ec1671ffb07f95b27f1a5812b7dc1c3 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 03f1b7d27aed5..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b91a260d8d12ee4b3302a63059c73a34de0ce146 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..b5291b30c7de8 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0.jar.sha1 @@ -0,0 +1 @@ +394e811e9d9bf0b9fba837f7ceca9e8f3e39d1c2 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 9a5c6669009eb..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc1ca9bd9e2c162dd1da8c2e7111913fd8033e48 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..49f55bea5e687 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0.jar.sha1 @@ -0,0 +1 @@ +5cd56acfa16ba20e19b5d21d90b510eada841431 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index cbf4f78c31999..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2fa3662a10a9e085b1c7b87293d727422cbe6224 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..c4b61b763b483 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0.jar.sha1 @@ -0,0 +1 @@ +db7b56f4cf533ad9022d2312c5ee48331edccca3 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index bd5bf428b6d44..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60aa50c11857e6739e68936cb45102562b2c46b4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..779cac9761242 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0.jar.sha1 @@ -0,0 +1 @@ +e8dba4d28a595eab2e8fb6095d1ac5f2d3872144 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a73900802ace1..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4586368007785a3be26db4b9ce404ffb8c76f350 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..cf5c49a2759c9 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0.jar.sha1 @@ -0,0 +1 @@ +1243c771ee824c46a3d66ae3e4256d919fc06fbe \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index bf0a50f7154e5..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c6d030ab2c148df7a6ba73a774ef4b8c720a6cb \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..830b9ccf9cbe2 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0.jar.sha1 @@ -0,0 +1 @@ +c783794b0d20d8dc1285edc7701f386b1f0e2fb8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index ba6ceb2aed9d8..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8275bf8df2644d5fcec2963cf237d14b6e00fefe \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..a96e05f5e3b87 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0.jar.sha1 @@ -0,0 +1 @@ +9438efa504a89afb6cb4c66448c257f865164d23 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 4c0db7a735c8d..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -557d62d2b13d3dcb1810a1633e22625e42425425 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..928cc6dea046c --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0.jar.sha1 @@ -0,0 +1 @@ +e1afb580df500626a1c695e0fc9a7e8a8f58bcac \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 0579316096a72..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3755ad4c98b49fe5055b32358e3071727177c03 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..a94663119e7d6 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0.jar.sha1 @@ -0,0 +1 @@ +a6ad941ef1fdad48673ed511631b7e48a9456bf7 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 134072bc13701..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0.jar.sha1 b/server/licenses/lucene-core-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..80ba6c76aa301 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0.jar.sha1 @@ -0,0 +1 @@ +730d9ac80436c8cbc0b2a8a749259be536b97316 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 8a3327cc8a227..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b62ebd53bbefb2f59cd246157a6768cae8a5a3a1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0.jar.sha1 b/server/licenses/lucene-grouping-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..5b781d26829ed --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0.jar.sha1 @@ -0,0 +1 @@ +56f99858a4421a517b52da36a222debcccab80c6 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 75fb5a7755639..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cba0fd4ccb98db8a72287a95d6b653e455f9eeb3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..e1ebb95fe1b05 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0.jar.sha1 @@ -0,0 +1 @@ +5266b45d7f049662817d739881765904621876d0 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 01e0197bc1713..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5127ed0b7516f8b28d84e837df4f33c67e361f6c \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0.jar.sha1 b/server/licenses/lucene-join-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..ff81c33c3f860 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0.jar.sha1 @@ -0,0 +1 @@ +c77154d18c4944ceb6ce0741060632f57d623fdc \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 3d6069f2a5c8b..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45c7b13aae1104f9f5f0fca0606e5741309c8d74 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0.jar.sha1 b/server/licenses/lucene-memory-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..7c0117dff6b68 --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0.jar.sha1 @@ -0,0 +1 @@ +186ff981feec1bdbf1a6236e786ec171b5fbe3e0 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a74be59aea39c..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2540c4b5d9dca8a39a3b4d58efe4ab484df7254f \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0.jar.sha1 b/server/licenses/lucene-misc-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..5cdf6810fa57c --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0.jar.sha1 @@ -0,0 +1 @@ +bf844bb6f6d84da19e8c79ce5fbb4cf6d00f2611 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index cf26412b63f80..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9d0c0c020917d4bf9b590526866ff5547dbaa17 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0.jar.sha1 b/server/licenses/lucene-queries-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..198890379374f --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0.jar.sha1 @@ -0,0 +1 @@ +229a50e6d9d4db076f671c230d493000c6e2972c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 63533b774673f..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50969cdb7279047fbec94dda6e7d74d1c73c07f8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..afdc275afe2b3 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0.jar.sha1 @@ -0,0 +1 @@ +8e58add0d0c39df97d07c8e343041989bf4b3a3f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 4eab31d62bd41..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94524b293572b1f0d01a0faeeade1ff24713f966 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..81ae3bddd0709 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0.jar.sha1 @@ -0,0 +1 @@ +1692604fa06a945d1ee19939022ef1a912235db3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index ae5a2ea0375fd..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -878db723e41ece636ed338c4ef374e900f221a14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0.jar.sha1 b/server/licenses/lucene-spatial-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..cc3f31340b9a2 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0.jar.sha1 @@ -0,0 +1 @@ +847d2f897961124e2fc7d5e55d8309635bb026bc \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 9f5129d89056a..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8dc85c32aeac6ff320aa6a9ea57881ad4847a55 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..3f05790e430f5 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0.jar.sha1 @@ -0,0 +1 @@ +586892eefc0546643d7f5d7f83659c7db0d534ff \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 02fcef681fc30..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -203d8d22ab172e624784a5fdeaecdd01ae25fb3d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..8c767b16c538b --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0.jar.sha1 @@ -0,0 +1 @@ +32cd2854f39ff453a5d128ce40e11eea4168abbf \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index a7daa7ff02a38..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d6cf8fa1064a86991d5cd12a2ed32119ac91212 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0.jar.sha1 b/server/licenses/lucene-suggest-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..59d59cf79413a --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0.jar.sha1 @@ -0,0 +1 @@ +0cdc1a512032f8b23dd4b1add0f5cd06325addc3 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 deleted file mode 100644 index 134072bc13701..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 new file mode 100644 index 0000000000000..80ba6c76aa301 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0.jar.sha1 @@ -0,0 +1 @@ +730d9ac80436c8cbc0b2a8a749259be536b97316 \ No newline at end of file From 8d208e3a857d674d4c9dfaf720a708992353d8b8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 22 Jun 2018 11:07:30 -0400 Subject: [PATCH 20/31] Revert "AwaitsFix FullClusterRestartIT#testRecovery" This reverts commit e9789ce43c3e2db2fff124b78746a7ea8a676071. --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1e5a78172c88d..2c19078b36aa6 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -701,7 +701,6 @@ public void testEmptyShard() throws IOException { * Tests recovery of an index with or without a translog and the * statistics we gather about that. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31530") public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; From 9a1e7642f296503ea097f0313ef24c3d69a9c805 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 22 Jun 2018 17:44:13 +0200 Subject: [PATCH 21/31] fix repository update with the same settings but different type (cherry picked from commit 7313a98) --- .../repositories/RepositoriesService.java | 2 +- .../repositories/RepositoriesServiceIT.java | 95 +++++++++++++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 636e108468e82..d5b2a6413e9a9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -349,7 +349,7 @@ private boolean registerRepository(RepositoryMetaData repositoryMetaData) throws Repository previous = repositories.get(repositoryMetaData.name()); if (previous != null) { RepositoryMetaData previousMetadata = previous.getMetadata(); - if (!previousMetadata.type().equals(repositoryMetaData.type()) && previousMetadata.settings().equals(repositoryMetaData.settings())) { + if (previousMetadata.equals(repositoryMetaData)) { // Previous version is the same as this one - ignore it return false; } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java new file mode 100644 index 0000000000000..28537022e3f33 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.mockstore.MockRepository; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; + +public class RepositoriesServiceIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockRepository.Plugin.class); + } + + public void testUpdateRepository() { + final InternalTestCluster cluster = internalCluster(); + + final String repositoryName = "test-repo"; + + final Client client = client(); + final RepositoriesService repositoriesService = + cluster.getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + assertAcked(client.admin().cluster().preparePutRepository(repositoryName) + .setType(FsRepository.TYPE) + .setSettings(repoSettings) + .get()); + + final GetRepositoriesResponse originalGetRepositoriesResponse = + client.admin().cluster().prepareGetRepositories(repositoryName).get(); + + assertThat(originalGetRepositoriesResponse.repositories(), hasSize(1)); + RepositoryMetaData originalRepositoryMetaData = originalGetRepositoriesResponse.repositories().get(0); + + assertThat(originalRepositoryMetaData.type(), equalTo(FsRepository.TYPE)); + + final Repository originalRepository = repositoriesService.repository(repositoryName); + assertThat(originalRepository, instanceOf(FsRepository.class)); + + final boolean updated = randomBoolean(); + final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; + + assertAcked(client.admin().cluster().preparePutRepository(repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + .get()); + + final GetRepositoriesResponse updatedGetRepositoriesResponse = + client.admin().cluster().prepareGetRepositories(repositoryName).get(); + + assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); + final RepositoryMetaData updatedRepositoryMetaData = updatedGetRepositoriesResponse.repositories().get(0); + + assertThat(updatedRepositoryMetaData.type(), equalTo(updatedRepositoryType)); + + final Repository updatedRepository = repositoriesService.repository(repositoryName); + assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); + } +} From a702ddcef47de7d2e59aac12a04fed4f2fcb0765 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 21 Jun 2018 16:53:20 -0700 Subject: [PATCH 22/31] In NumberFieldType equals and hashCode, make sure that NumberType is taken into account. (#31514) (cherry picked from commit 3b7225e9d1dead1718132a51fdde5d1cbe4a42b9) --- .../index/mapper/NumberFieldMapper.java | 18 ++++++++++++++++-- .../index/mapper/NumberFieldTypeTests.java | 15 +++++++++++++-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 05f1853b313e2..c4788056337b6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -847,7 +847,7 @@ private static double objectToDouble(Object value) { public static final class NumberFieldType extends SimpleMappedFieldType { - NumberType type; + private final NumberType type; public NumberFieldType(NumberType type) { super(); @@ -857,7 +857,7 @@ public NumberFieldType(NumberType type) { setOmitNorms(true); } - NumberFieldType(NumberFieldType other) { + private NumberFieldType(NumberFieldType other) { super(other); this.type = other.type; } @@ -937,6 +937,20 @@ public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { return new DocValueFormat.Decimal(format); } } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + NumberFieldType that = (NumberFieldType) o; + return type == that.type; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), type); + } } private Boolean includeInAll; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java index 3ffe48fe70af6..4b2967553e57b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.document.Document; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.HalfFloatPoint; @@ -37,10 +36,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.hamcrest.Matchers; import org.junit.Before; @@ -68,6 +68,17 @@ protected MappedFieldType createDefaultFieldType() { return new NumberFieldMapper.NumberFieldType(type); } + public void testEqualsWithDifferentNumberTypes() { + NumberType type = randomFrom(NumberType.values()); + NumberFieldType fieldType = new NumberFieldType(type); + + NumberType otherType = randomValueOtherThan(type, + () -> randomFrom(NumberType.values())); + NumberFieldType otherFieldType = new NumberFieldType(otherType); + + assertNotEquals(fieldType, otherFieldType); + } + public void testIsFieldWithinQuery() throws IOException { MappedFieldType ft = createDefaultFieldType(); // current impl ignores args and should always return INTERSECTS From 1c6171f60cc8ee45ac0865f28d44d263e2fd898a Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Sun, 17 Jun 2018 21:42:42 -0700 Subject: [PATCH 23/31] Remove some cases in FieldTypeLookupTests that are no longer relevant. (#31381) (cherry picked from commit 16fa6b270f5ebe9e78e0d283a88f4f7ced71c35a) --- .../index/mapper/FieldTypeLookupTests.java | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index fe885a46b87ef..203a17fe8ee4f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -32,8 +32,6 @@ import java.util.Iterator; import java.util.List; -import static org.hamcrest.Matchers.containsString; - public class FieldTypeLookupTests extends ESTestCase { public void testEmpty() { @@ -85,29 +83,6 @@ public void testAddExistingField() { assertEquals(f2.fieldType(), lookup2.get("foo")); } - public void testAddExistingIndexName() { - MockFieldMapper f = new MockFieldMapper("foo"); - MockFieldMapper f2 = new MockFieldMapper("bar"); - FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean()); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); - - assertSame(f.fieldType(), lookup2.get("foo")); - assertSame(f2.fieldType(), lookup2.get("bar")); - assertEquals(2, size(lookup2.iterator())); - } - - public void testAddExistingFullName() { - MockFieldMapper f = new MockFieldMapper("foo"); - MockFieldMapper f2 = new MockFieldMapper("foo"); - FieldTypeLookup lookup = new FieldTypeLookup(); - try { - lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("mapper [foo] has different [index_name]")); - } - } - public void testCheckCompatibilityMismatchedTypes() { FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); From 805efc7df1b33f65cae02a6d56c9679c59c7707c Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Mon, 18 Jun 2018 08:21:41 -0700 Subject: [PATCH 24/31] Clarify that IP range data can be specified in CIDR notation. (#31374) (cherry picked from commit 3f5ebb862de56efed4edcdb8b338590cb54be6f5) --- docs/reference/mapping/types/range.asciidoc | 24 +++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 3013204e4ca21..a7ab6346176cb 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -168,6 +168,30 @@ This query produces a similar result: -------------------------------------------------- // TESTRESPONSE[s/"took": 13/"took" : $body.took/] +[[ip-range]] +==== IP Range + +In addition to the range format above, IP ranges can be provided in +https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation[CIDR] notation: + +[source,js] +-------------------------------------------------- +PUT range_index/_mapping/_doc +{ + "properties": { + "ip_whitelist": { + "type": "ip_range" + } + } +} + +PUT range_index/_doc/2 +{ + "ip_whitelist" : "192.168.0.0/16" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:range_index] [[range-params]] ==== Parameters for range fields From b3c914d706adc33c4cde444216c7526048c0cb2d Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 22 Jun 2018 21:08:11 +0200 Subject: [PATCH 25/31] IndexShard should not return null stats - empty stats or AlreadyCloseException if it's closed is better (cherry picked from commit f04c579) --- .../stats/TransportClusterStatsAction.java | 17 ++- .../admin/indices/stats/CommonStats.java | 113 +++++++++--------- .../admin/indices/stats/ShardStats.java | 1 + .../stats/TransportIndicesStatsAction.java | 16 ++- .../elasticsearch/index/shard/IndexShard.java | 16 +-- .../elasticsearch/indices/IndicesService.java | 17 ++- .../index/shard/IndexShardTests.java | 34 ++++++ .../test/InternalTestCluster.java | 10 +- 8 files changed, 152 insertions(+), 72 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c87b55b0bbd7d..00d344831882d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.stats; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -37,6 +38,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeService; @@ -99,13 +102,23 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq for (IndexShard indexShard : indexService) { if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards + CommitStats commitStats; + SeqNoStats seqNoStats; + try { + commitStats = indexShard.commitStats(); + seqNoStats = indexShard.seqNoStats(); + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine + commitStats = null; + seqNoStats = null; + } shardsStats.add( new ShardStats( indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), - indexShard.commitStats(), - indexShard.seqNoStats())); + commitStats, + seqNoStats)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 6379f8da21aa2..256f5019a20a4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.stats; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -170,60 +171,64 @@ public CommonStats(CommonStatsFlags flags) { public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { CommonStatsFlags.Flag[] setFlags = flags.getFlags(); for (CommonStatsFlags.Flag flag : setFlags) { - switch (flag) { - case Docs: - docs = indexShard.docStats(); - break; - case Store: - store = indexShard.storeStats(); - break; - case Indexing: - indexing = indexShard.indexingStats(flags.types()); - break; - case Get: - get = indexShard.getStats(); - break; - case Search: - search = indexShard.searchStats(flags.groups()); - break; - case Merge: - merge = indexShard.mergeStats(); - break; - case Refresh: - refresh = indexShard.refreshStats(); - break; - case Flush: - flush = indexShard.flushStats(); - break; - case Warmer: - warmer = indexShard.warmerStats(); - break; - case QueryCache: - queryCache = indicesQueryCache.getStats(indexShard.shardId()); - break; - case FieldData: - fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); - break; - case Completion: - completion = indexShard.completionStats(flags.completionDataFields()); - break; - case Segments: - segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); - break; - case Translog: - translog = indexShard.translogStats(); - break; - case Suggest: - // skip - break; - case RequestCache: - requestCache = indexShard.requestCache().stats(); - break; - case Recovery: - recoveryStats = indexShard.recoveryStats(); - break; - default: - throw new IllegalStateException("Unknown Flag: " + flag); + try { + switch (flag) { + case Docs: + docs = indexShard.docStats(); + break; + case Store: + store = indexShard.storeStats(); + break; + case Indexing: + indexing = indexShard.indexingStats(flags.types()); + break; + case Get: + get = indexShard.getStats(); + break; + case Search: + search = indexShard.searchStats(flags.groups()); + break; + case Merge: + merge = indexShard.mergeStats(); + break; + case Refresh: + refresh = indexShard.refreshStats(); + break; + case Flush: + flush = indexShard.flushStats(); + break; + case Warmer: + warmer = indexShard.warmerStats(); + break; + case QueryCache: + queryCache = indicesQueryCache.getStats(indexShard.shardId()); + break; + case FieldData: + fieldData = indexShard.fieldDataStats(flags.fieldDataFields()); + break; + case Completion: + completion = indexShard.completionStats(flags.completionDataFields()); + break; + case Segments: + segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); + break; + case Translog: + translog = indexShard.translogStats(); + break; + case Suggest: + // skip + break; + case RequestCache: + requestCache = indexShard.requestCache().stats(); + break; + case Recovery: + recoveryStats = indexShard.recoveryStats(); + break; + default: + throw new IllegalStateException("Unknown Flag: " + flag); + } + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 8b41c4bf90c99..898f3d69456b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -70,6 +70,7 @@ public CommonStats getStats() { return this.commonStats; } + @Nullable public CommitStats getCommitStats() { return this.commitStats; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 50d7712da11d0..db6915ce9e9bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.stats; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; @@ -33,6 +34,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; @@ -158,9 +161,20 @@ protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting sh flags.set(CommonStatsFlags.Flag.Recovery); } + CommitStats commitStats; + SeqNoStats seqNoStats; + try { + commitStats = indexShard.commitStats(); + seqNoStats = indexShard.seqNoStats(); + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine + commitStats = null; + seqNoStats = null; + } + return new ShardStats( indexShard.routingEntry(), indexShard.shardPath(), - new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats()); + new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), commitStats, seqNoStats); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index dfe2f3e1b46b1..a717b34eede37 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -900,21 +900,19 @@ public DocsStats docStats() { } /** - * @return {@link CommitStats} if engine is open, otherwise null + * @return {@link CommitStats} + * @throws AlreadyClosedException if shard is closed */ - @Nullable public CommitStats commitStats() { - Engine engine = getEngineOrNull(); - return engine == null ? null : engine.commitStats(); + return getEngine().commitStats(); } /** - * @return {@link SeqNoStats} if engine is open, otherwise null + * @return {@link SeqNoStats} + * @throws AlreadyClosedException if shard is closed */ - @Nullable public SeqNoStats seqNoStats() { - Engine engine = getEngineOrNull(); - return engine == null ? null : engine.getSeqNoStats(replicationTracker.getGlobalCheckpoint()); + return getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint()); } public IndexingStats indexingStats(String... types) { @@ -944,8 +942,6 @@ public StoreStats storeStats() { return store.stats(); } catch (IOException e) { throw new ElasticsearchException("io exception while building 'store stats'", e); - } catch (AlreadyClosedException ex) { - return null; // already closed } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index d6c0878108f37..6003b720a499b 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -79,6 +79,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.request.ShardRequestCache; +import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -91,6 +92,7 @@ import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -333,13 +335,24 @@ IndexShardStats indexShardStats(final IndicesService indicesService, final Index return null; } + CommitStats commitStats; + SeqNoStats seqNoStats; + try { + commitStats = indexShard.commitStats(); + seqNoStats = indexShard.seqNoStats(); + } catch (AlreadyClosedException e) { + // shard is closed - no stats is fine + commitStats = null; + seqNoStats = null; + } + return new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), - indexShard.commitStats(), - indexShard.seqNoStats()) + commitStats, + seqNoStats) }); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 8a3cf73554d1b..bb6a007455433 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -71,6 +71,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; @@ -86,6 +87,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -2935,4 +2937,36 @@ public void onShardInactive(IndexShard indexShard) { closeShards(primary); } + public void testOnCloseStats() throws IOException { + final IndexShard indexShard = newStartedShard(true); + + for (int i = 0; i < 3; i++) { + indexDoc(indexShard, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); + indexShard.refresh("test"); // produce segments + } + + // check stats on closed and on opened shard + if (randomBoolean()) { + closeShards(indexShard); + + expectThrows(AlreadyClosedException.class, () -> indexShard.seqNoStats()); + expectThrows(AlreadyClosedException.class, () -> indexShard.commitStats()); + expectThrows(AlreadyClosedException.class, () -> indexShard.storeStats()); + + } else { + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + assertThat(seqNoStats.getLocalCheckpoint(), equalTo(2L)); + + final CommitStats commitStats = indexShard.commitStats(); + assertThat(commitStats.getGeneration(), equalTo(2L)); + + final StoreStats storeStats = indexShard.storeStats(); + + assertThat(storeStats.sizeInBytes(), greaterThan(0L)); + + closeShards(indexShard); + } + + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index de4b5a5ae0582..f0bbb319ff55b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1114,17 +1114,21 @@ private void assertSameSyncIdSameDocs() { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - CommitStats commitStats = indexShard.commitStats(); - if (commitStats != null) { // null if the engine is closed or if the shard is recovering + try { + CommitStats commitStats = indexShard.commitStats(); String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); if (syncId != null) { long liveDocsOnShard = commitStats.getNumDocs(); if (docsOnShards.get(syncId) != null) { - assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard)); + assertThat("sync id is equal but number of docs does not match on node " + + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard)); } else { docsOnShards.put(syncId, liveDocsOnShard); } } + } catch (AlreadyClosedException e) { + // the engine is closed or if the shard is recovering } } } From 70357934d5d770d02b4b57e4ce4b81e040e8d674 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 22 Jun 2018 16:18:06 -0700 Subject: [PATCH 26/31] [DOCS] Move sql to docs --- docs/build.gradle | 82 +++++++++++++++++++ .../reference}/sql/endpoints/cli.asciidoc | 1 + .../reference}/sql/endpoints/index.asciidoc | 0 .../reference}/sql/endpoints/jdbc.asciidoc | 1 + .../reference}/sql/endpoints/rest.asciidoc | 2 + .../sql/endpoints/translate.asciidoc | 1 + .../reference}/sql/functions/index.asciidoc | 2 + .../reference}/sql/getting-started.asciidoc | 2 + .../en => docs/reference}/sql/index.asciidoc | 1 + .../sql/language/data-types.asciidoc | 2 + .../reference}/sql/language/index.asciidoc | 2 + .../reference}/sql/language/reserved.asciidoc | 2 + .../reference}/sql/language/syntax.asciidoc | 2 + .../reference}/sql/overview.asciidoc | 2 + 14 files changed, 102 insertions(+) rename {x-pack/docs/en => docs/reference}/sql/endpoints/cli.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/endpoints/index.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/sql/endpoints/jdbc.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/endpoints/rest.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/endpoints/translate.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/functions/index.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/getting-started.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/sql/index.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/sql/language/data-types.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/sql/language/index.asciidoc (90%) rename {x-pack/docs/en => docs/reference}/sql/language/reserved.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/sql/language/syntax.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/sql/overview.asciidoc (97%) diff --git a/docs/build.gradle b/docs/build.gradle index f28de4fa6fde9..ee062655664bc 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -515,3 +515,85 @@ for (int i = 0; i < 5; i++) { {"index":{}} {"ip": "12.0.0.$i"}""" } + +// Used by SQL because it looks SQL-ish +buildRestTests.setups['library'] = ''' + - do: + indices.create: + index: library + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + book: + properties: + name: + type: text + fields: + keyword: + type: keyword + author: + type: text + fields: + keyword: + type: keyword + release_date: + type: date + page_count: + type: short + - do: + bulk: + index: library + type: book + refresh: true + body: | + {"index":{"_id": "Leviathan Wakes"}} + {"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} + {"index":{"_id": "Hyperion"}} + {"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482} + {"index":{"_id": "Dune"}} + {"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} + {"index":{"_id": "Dune Messiah"}} + {"name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331} + {"index":{"_id": "Children of Dune"}} + {"name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408} + {"index":{"_id": "God Emperor of Dune"}} + {"name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454} + {"index":{"_id": "Consider Phlebas"}} + {"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471} + {"index":{"_id": "Pandora's Star"}} + {"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768} + {"index":{"_id": "Revelation Space"}} + {"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585} + {"index":{"_id": "A Fire Upon the Deep"}} + {"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613} + {"index":{"_id": "Ender's Game"}} + {"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324} + {"index":{"_id": "1984"}} + {"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328} + {"index":{"_id": "Fahrenheit 451"}} + {"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227} + {"index":{"_id": "Brave New World"}} + {"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268} + {"index":{"_id": "Foundation"}} + {"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224} + {"index":{"_id": "The Giver"}} + {"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208} + {"index":{"_id": "Slaughterhouse-Five"}} + {"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275} + {"index":{"_id": "The Hitchhiker's Guide to the Galaxy"}} + {"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180} + {"index":{"_id": "Snow Crash"}} + {"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470} + {"index":{"_id": "Neuromancer"}} + {"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271} + {"index":{"_id": "The Handmaid's Tale"}} + {"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311} + {"index":{"_id": "Starship Troopers"}} + {"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335} + {"index":{"_id": "The Left Hand of Darkness"}} + {"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304} + {"index":{"_id": "The Moon is a Harsh Mistress"}} + {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} +''' \ No newline at end of file diff --git a/x-pack/docs/en/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc similarity index 99% rename from x-pack/docs/en/sql/endpoints/cli.asciidoc rename to docs/reference/sql/endpoints/cli.asciidoc index edbb1dcace4f1..206d687d97a5f 100644 --- a/x-pack/docs/en/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[sql-cli]] == SQL CLI diff --git a/x-pack/docs/en/sql/endpoints/index.asciidoc b/docs/reference/sql/endpoints/index.asciidoc similarity index 100% rename from x-pack/docs/en/sql/endpoints/index.asciidoc rename to docs/reference/sql/endpoints/index.asciidoc diff --git a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc similarity index 99% rename from x-pack/docs/en/sql/endpoints/jdbc.asciidoc rename to docs/reference/sql/endpoints/jdbc.asciidoc index 6959035bf09e4..2125cc2ee839c 100644 --- a/x-pack/docs/en/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[sql-jdbc]] == SQL JDBC diff --git a/x-pack/docs/en/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc similarity index 99% rename from x-pack/docs/en/sql/endpoints/rest.asciidoc rename to docs/reference/sql/endpoints/rest.asciidoc index d31b03d3e7736..ef4c08ba483bc 100644 --- a/x-pack/docs/en/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-rest]] == SQL REST API diff --git a/x-pack/docs/en/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc similarity index 99% rename from x-pack/docs/en/sql/endpoints/translate.asciidoc rename to docs/reference/sql/endpoints/translate.asciidoc index 9c1d71af5d35e..3f2f87ab2e2f5 100644 --- a/x-pack/docs/en/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[sql-translate]] == SQL Translate API diff --git a/x-pack/docs/en/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc similarity index 99% rename from x-pack/docs/en/sql/functions/index.asciidoc rename to docs/reference/sql/functions/index.asciidoc index a4e7028cf39c3..653b5a92fec52 100644 --- a/x-pack/docs/en/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-functions]] == Functions and Operators diff --git a/x-pack/docs/en/sql/getting-started.asciidoc b/docs/reference/sql/getting-started.asciidoc similarity index 98% rename from x-pack/docs/en/sql/getting-started.asciidoc rename to docs/reference/sql/getting-started.asciidoc index 24f01910551bb..7d1bd33e8a035 100644 --- a/x-pack/docs/en/sql/getting-started.asciidoc +++ b/docs/reference/sql/getting-started.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-getting-started]] == Getting Started with SQL diff --git a/x-pack/docs/en/sql/index.asciidoc b/docs/reference/sql/index.asciidoc similarity index 98% rename from x-pack/docs/en/sql/index.asciidoc rename to docs/reference/sql/index.asciidoc index 902ea8ada7e22..3d69a240a9a6f 100644 --- a/x-pack/docs/en/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[xpack-sql]] = SQL Access diff --git a/x-pack/docs/en/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc similarity index 98% rename from x-pack/docs/en/sql/language/data-types.asciidoc rename to docs/reference/sql/language/data-types.asciidoc index a01c2fda5c726..0ea152f639d61 100644 --- a/x-pack/docs/en/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-data-types]] === Data Types diff --git a/x-pack/docs/en/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc similarity index 90% rename from x-pack/docs/en/sql/language/index.asciidoc rename to docs/reference/sql/language/index.asciidoc index 24bf450f1e42e..de8528242b07a 100644 --- a/x-pack/docs/en/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-spec]] == SQL Language diff --git a/x-pack/docs/en/sql/language/reserved.asciidoc b/docs/reference/sql/language/reserved.asciidoc similarity index 98% rename from x-pack/docs/en/sql/language/reserved.asciidoc rename to docs/reference/sql/language/reserved.asciidoc index 8dc62e90a9eb1..1ae551cc43c08 100644 --- a/x-pack/docs/en/sql/language/reserved.asciidoc +++ b/docs/reference/sql/language/reserved.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-spec-reserved]] === Reserved Keywords diff --git a/x-pack/docs/en/sql/language/syntax.asciidoc b/docs/reference/sql/language/syntax.asciidoc similarity index 99% rename from x-pack/docs/en/sql/language/syntax.asciidoc rename to docs/reference/sql/language/syntax.asciidoc index 5b837c91db2b1..2565c54166095 100644 --- a/x-pack/docs/en/sql/language/syntax.asciidoc +++ b/docs/reference/sql/language/syntax.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-spec-syntax]] === SQL Statement Syntax diff --git a/x-pack/docs/en/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc similarity index 97% rename from x-pack/docs/en/sql/overview.asciidoc rename to docs/reference/sql/overview.asciidoc index 34d0dfb538352..b4b93d92a13d4 100644 --- a/x-pack/docs/en/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[sql-overview]] == Overview From 240ae2395d92331edf0148e753dd27c36b1f93a5 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 22 Jun 2018 17:13:19 -0700 Subject: [PATCH 27/31] [DOCS] Fixes SQL docs in nav --- docs/reference/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 49088ea2c2311..d64acacb488f6 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -59,7 +59,7 @@ include::index-modules.asciidoc[] include::ingest.asciidoc[] -include::{xes-repo-dir}/sql/index.asciidoc[] +include::sql/index.asciidoc[] include::{xes-repo-dir}/monitoring/index.asciidoc[] From aa6425fbf0341a724ebf519ae30cef5f4fb984ef Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 22 Jun 2018 15:39:34 -0700 Subject: [PATCH 28/31] [DOCS] Move monitoring to docs folder (#31477) --- docs/reference/index.asciidoc | 2 +- .../docs/en => docs/reference}/monitoring/collectors.asciidoc | 1 + .../docs/en => docs/reference}/monitoring/exporters.asciidoc | 1 + .../docs/en => docs/reference}/monitoring/http-export.asciidoc | 1 + {x-pack/docs/en => docs/reference}/monitoring/index.asciidoc | 1 + .../docs/en => docs/reference}/monitoring/local-export.asciidoc | 1 + .../docs/en => docs/reference}/monitoring/pause-export.asciidoc | 1 + x-pack/docs/en/monitoring/configuring-monitoring.asciidoc | 1 + x-pack/docs/en/monitoring/indices.asciidoc | 1 + 9 files changed, 9 insertions(+), 1 deletion(-) rename {x-pack/docs/en => docs/reference}/monitoring/collectors.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/monitoring/exporters.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/monitoring/http-export.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/monitoring/index.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/monitoring/local-export.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/monitoring/pause-export.asciidoc (98%) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index d64acacb488f6..19bd49415b3cf 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -61,7 +61,7 @@ include::ingest.asciidoc[] include::sql/index.asciidoc[] -include::{xes-repo-dir}/monitoring/index.asciidoc[] +include::monitoring/index.asciidoc[] include::{xes-repo-dir}/rollup/index.asciidoc[] diff --git a/x-pack/docs/en/monitoring/collectors.asciidoc b/docs/reference/monitoring/collectors.asciidoc similarity index 99% rename from x-pack/docs/en/monitoring/collectors.asciidoc rename to docs/reference/monitoring/collectors.asciidoc index 336f204b5eefb..bd48d1287006a 100644 --- a/x-pack/docs/en/monitoring/collectors.asciidoc +++ b/docs/reference/monitoring/collectors.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[es-monitoring-collectors]] == Collectors diff --git a/x-pack/docs/en/monitoring/exporters.asciidoc b/docs/reference/monitoring/exporters.asciidoc similarity index 99% rename from x-pack/docs/en/monitoring/exporters.asciidoc rename to docs/reference/monitoring/exporters.asciidoc index e7727f1e97af0..2a7729eee9425 100644 --- a/x-pack/docs/en/monitoring/exporters.asciidoc +++ b/docs/reference/monitoring/exporters.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[es-monitoring-exporters]] == Exporters diff --git a/x-pack/docs/en/monitoring/http-export.asciidoc b/docs/reference/monitoring/http-export.asciidoc similarity index 99% rename from x-pack/docs/en/monitoring/http-export.asciidoc rename to docs/reference/monitoring/http-export.asciidoc index db1dbe2a29c5b..4dfe1a0c537ea 100644 --- a/x-pack/docs/en/monitoring/http-export.asciidoc +++ b/docs/reference/monitoring/http-export.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[http-exporter]] === HTTP Exporters diff --git a/x-pack/docs/en/monitoring/index.asciidoc b/docs/reference/monitoring/index.asciidoc similarity index 99% rename from x-pack/docs/en/monitoring/index.asciidoc rename to docs/reference/monitoring/index.asciidoc index 6b8ecc5038ea0..d6a55f44585dd 100644 --- a/x-pack/docs/en/monitoring/index.asciidoc +++ b/docs/reference/monitoring/index.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[es-monitoring]] = Monitoring {es} diff --git a/x-pack/docs/en/monitoring/local-export.asciidoc b/docs/reference/monitoring/local-export.asciidoc similarity index 99% rename from x-pack/docs/en/monitoring/local-export.asciidoc rename to docs/reference/monitoring/local-export.asciidoc index 12d0ab5ea9f81..2bc757f07ecc8 100644 --- a/x-pack/docs/en/monitoring/local-export.asciidoc +++ b/docs/reference/monitoring/local-export.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[local-exporter]] === Local Exporters diff --git a/x-pack/docs/en/monitoring/pause-export.asciidoc b/docs/reference/monitoring/pause-export.asciidoc similarity index 98% rename from x-pack/docs/en/monitoring/pause-export.asciidoc rename to docs/reference/monitoring/pause-export.asciidoc index d26799c6892c3..128e72a463c2d 100644 --- a/x-pack/docs/en/monitoring/pause-export.asciidoc +++ b/docs/reference/monitoring/pause-export.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[pause-export]] == Pausing Data Collection diff --git a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc index 7bbc5ab19fee8..ce5927e76be1e 100644 --- a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc +++ b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold"] [[configuring-monitoring]] == Configuring Monitoring in {es} ++++ diff --git a/x-pack/docs/en/monitoring/indices.asciidoc b/x-pack/docs/en/monitoring/indices.asciidoc index efa9836daa2e9..a27d91d423e8d 100644 --- a/x-pack/docs/en/monitoring/indices.asciidoc +++ b/x-pack/docs/en/monitoring/indices.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[config-monitoring-indices]] === Configuring Indices for Monitoring From a55bdffd28ef8a7ff93d32a20cd03182eceaf4f8 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 22 Jun 2018 18:09:37 -0700 Subject: [PATCH 29/31] [DOCS] Updates Watcher examples for code testing (#31152) --- x-pack/docs/build.gradle | 32 ------------------- x-pack/docs/en/watcher/actions/email.asciidoc | 7 ++-- .../docs/en/watcher/actions/hipchat.asciidoc | 4 ++- x-pack/docs/en/watcher/actions/index.asciidoc | 1 + x-pack/docs/en/watcher/actions/jira.asciidoc | 1 + .../docs/en/watcher/actions/logging.asciidoc | 1 + .../en/watcher/actions/pagerduty.asciidoc | 2 ++ x-pack/docs/en/watcher/actions/slack.asciidoc | 3 ++ .../docs/en/watcher/actions/webhook.asciidoc | 4 +++ .../docs/en/watcher/condition/always.asciidoc | 1 + .../watcher/condition/array-compare.asciidoc | 1 + .../en/watcher/condition/compare.asciidoc | 3 ++ .../docs/en/watcher/condition/never.asciidoc | 1 + .../docs/en/watcher/condition/script.asciidoc | 7 ++++ .../en/watcher/customizing-watches.asciidoc | 10 ++++-- .../example-watch-meetupdata.asciidoc | 24 ++++++++++++-- .../en/watcher/how-watcher-works.asciidoc | 7 ++++ x-pack/docs/en/watcher/input/chain.asciidoc | 2 ++ x-pack/docs/en/watcher/input/http.asciidoc | 6 ++++ x-pack/docs/en/watcher/input/search.asciidoc | 5 +++ x-pack/docs/en/watcher/input/simple.asciidoc | 2 ++ x-pack/docs/en/watcher/transform.asciidoc | 1 + .../docs/en/watcher/transform/chain.asciidoc | 1 + .../docs/en/watcher/transform/script.asciidoc | 1 + .../docs/en/watcher/transform/search.asciidoc | 4 +++ .../en/watcher/trigger/schedule/cron.asciidoc | 2 ++ .../watcher/trigger/schedule/daily.asciidoc | 4 +++ .../watcher/trigger/schedule/hourly.asciidoc | 2 ++ .../trigger/schedule/interval.asciidoc | 1 + .../watcher/trigger/schedule/monthly.asciidoc | 3 ++ .../watcher/trigger/schedule/weekly.asciidoc | 3 ++ .../watcher/trigger/schedule/yearly.asciidoc | 3 ++ .../docs/en/watcher/troubleshooting.asciidoc | 2 ++ 33 files changed, 110 insertions(+), 41 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 06d4d2cbe8ed7..ffb3b42f45b9e 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -14,38 +14,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/security/authorization/run-as-privilege.asciidoc', 'en/security/tribe-clients-integrations/http.asciidoc', 'en/security/authorization/custom-roles-provider.asciidoc', - 'en/watcher/actions/email.asciidoc', - 'en/watcher/actions/hipchat.asciidoc', - 'en/watcher/actions/index.asciidoc', - 'en/watcher/actions/logging.asciidoc', - 'en/watcher/actions/pagerduty.asciidoc', - 'en/watcher/actions/slack.asciidoc', - 'en/watcher/actions/jira.asciidoc', - 'en/watcher/actions/webhook.asciidoc', - 'en/watcher/condition/always.asciidoc', - 'en/watcher/condition/array-compare.asciidoc', - 'en/watcher/condition/compare.asciidoc', - 'en/watcher/condition/never.asciidoc', - 'en/watcher/condition/script.asciidoc', - 'en/watcher/customizing-watches.asciidoc', - 'en/watcher/example-watches/example-watch-meetupdata.asciidoc', - 'en/watcher/how-watcher-works.asciidoc', - 'en/watcher/input/chain.asciidoc', - 'en/watcher/input/http.asciidoc', - 'en/watcher/input/search.asciidoc', - 'en/watcher/input/simple.asciidoc', - 'en/watcher/transform.asciidoc', - 'en/watcher/transform/chain.asciidoc', - 'en/watcher/transform/script.asciidoc', - 'en/watcher/transform/search.asciidoc', - 'en/watcher/trigger/schedule/cron.asciidoc', - 'en/watcher/trigger/schedule/daily.asciidoc', - 'en/watcher/trigger/schedule/hourly.asciidoc', - 'en/watcher/trigger/schedule/interval.asciidoc', - 'en/watcher/trigger/schedule/monthly.asciidoc', - 'en/watcher/trigger/schedule/weekly.asciidoc', - 'en/watcher/trigger/schedule/yearly.asciidoc', - 'en/watcher/troubleshooting.asciidoc', 'en/rest-api/ml/delete-snapshot.asciidoc', 'en/rest-api/ml/get-bucket.asciidoc', 'en/rest-api/ml/get-job-stats.asciidoc', diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index 5e0ee4c451ac6..0da028fcc7b1e 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -35,6 +35,7 @@ the watch payload in the email body: } } -------------------------------------------------- +// NOTCONSOLE <1> The id of the action. <2> The action type is set to `email`. <3> One or more addresses to send the email to. Must be specified in the @@ -92,6 +93,7 @@ killed by firewalls or load balancers inbetween. } } -------------------------------------------------- +// NOTCONSOLE <1> The ID of the attachment, which is used as the file name in the email attachment. <2> The type of the attachment and its specific configuration. @@ -158,9 +160,8 @@ include::{kib-repo-dir}/reporting/watch-example.asciidoc[] include::{kib-repo-dir}/reporting/report-intervals.asciidoc[] -//TODO: RE-ADD LINK: -//For more information, see -//{kibana-ref}/automating-report-generation.html[Automating Report Generation]. +For more information, see +{kibana-ref}/automating-report-generation.html[Automating Report Generation]. [[email-action-attributes]] ==== Email Action Attributes diff --git a/x-pack/docs/en/watcher/actions/hipchat.asciidoc b/x-pack/docs/en/watcher/actions/hipchat.asciidoc index a55142e5b6553..49799567410ea 100644 --- a/x-pack/docs/en/watcher/actions/hipchat.asciidoc +++ b/x-pack/docs/en/watcher/actions/hipchat.asciidoc @@ -37,6 +37,7 @@ attribute is the message itself: } } -------------------------------------------------- +// NOTCONSOLE <1> The name of a HipChat account configured in `elasticsearch.yml`. <2> The message you want to send to HipChat. @@ -66,6 +67,7 @@ For example, the following action is configured to send messages to the } } -------------------------------------------------- +// NOTCONSOLE To send messages with a HipChat account that uses the <> profile, you need to specify what room or rooms you want to send the message to. @@ -92,7 +94,7 @@ For example, the following action is configured to send messages to the } } -------------------------------------------------- - +// NOTCONSOLE [[hipchat-action-attributes]] ==== HipChat Action Attributes diff --git a/x-pack/docs/en/watcher/actions/index.asciidoc b/x-pack/docs/en/watcher/actions/index.asciidoc index dd8d76fe549f3..8a31b150f22cb 100644 --- a/x-pack/docs/en/watcher/actions/index.asciidoc +++ b/x-pack/docs/en/watcher/actions/index.asciidoc @@ -22,6 +22,7 @@ The following snippet shows a simple `index` action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> The id of the action <2> An optional <> to restrict action execution <3> An optional <> to transform the payload and prepare the data that should be indexed diff --git a/x-pack/docs/en/watcher/actions/jira.asciidoc b/x-pack/docs/en/watcher/actions/jira.asciidoc index c60d549b65bb1..952b1b71e6d29 100644 --- a/x-pack/docs/en/watcher/actions/jira.asciidoc +++ b/x-pack/docs/en/watcher/actions/jira.asciidoc @@ -40,6 +40,7 @@ The following snippet shows a simple jira action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> The name of a Jira account configured in `elasticsearch.yml`. <2> The key of the Jira project in which the issue will be created. <3> The name of the issue type. diff --git a/x-pack/docs/en/watcher/actions/logging.asciidoc b/x-pack/docs/en/watcher/actions/logging.asciidoc index aa747028f7fa2..a8a4454c377eb 100644 --- a/x-pack/docs/en/watcher/actions/logging.asciidoc +++ b/x-pack/docs/en/watcher/actions/logging.asciidoc @@ -25,6 +25,7 @@ The following snippet shows a simple logging action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> The id of the action. <2> An optional <> to transform the payload before executing the `logging` action. diff --git a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc index caac4da5c0e8b..d13b722bb73ef 100644 --- a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc +++ b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc @@ -25,6 +25,7 @@ The following snippet shows a simple PagerDuty action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> Description of the message @@ -59,6 +60,7 @@ payload as well as an array of contexts to the action. } } -------------------------------------------------- +// NOTCONSOLE [[pagerduty-action-attributes]] diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index a90336db41595..587d2ed7f8e59 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -29,6 +29,7 @@ The following snippet shows a simple slack action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> The channels and users you want to send the message to. <2> The content of the message. @@ -66,6 +67,7 @@ The following snippet shows a standard message attachment: } } -------------------------------------------------- +// NOTCONSOLE [[slack-dynamic-attachment]] @@ -131,6 +133,7 @@ aggregation and the Slack action: } } -------------------------------------------------- +// NOTCONSOLE <1> The list generated by the action's transform. <2> The parameter placeholders refer to attributes in each item of the list generated by the transform. diff --git a/x-pack/docs/en/watcher/actions/webhook.asciidoc b/x-pack/docs/en/watcher/actions/webhook.asciidoc index 806777a406c6f..aabfb17f3b6e5 100644 --- a/x-pack/docs/en/watcher/actions/webhook.asciidoc +++ b/x-pack/docs/en/watcher/actions/webhook.asciidoc @@ -30,6 +30,7 @@ The following snippet shows a simple webhook action definition: } } -------------------------------------------------- +// NOTCONSOLE <1> The id of the action <2> An optional <> to transform the payload before executing the `webhook` action @@ -65,6 +66,7 @@ For example, the following `webhook` action creates a new issue in GitHub: } } -------------------------------------------------- +// NOTCONSOLE <1> The username and password for the user creating the issue NOTE: By default, both the username and the password are stored in the `.watches` @@ -101,6 +103,7 @@ the values serve as the parameter values: } } -------------------------------------------------- +// NOTCONSOLE <1> The parameter values can contain templated strings. @@ -128,6 +131,7 @@ the values serve as the header values: } } -------------------------------------------------- +// NOTCONSOLE <1> The header values can contain templated strings. diff --git a/x-pack/docs/en/watcher/condition/always.asciidoc b/x-pack/docs/en/watcher/condition/always.asciidoc index 22203018c926e..c2eb37be52c8f 100644 --- a/x-pack/docs/en/watcher/condition/always.asciidoc +++ b/x-pack/docs/en/watcher/condition/always.asciidoc @@ -22,3 +22,4 @@ object: "always" : {} } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/condition/array-compare.asciidoc b/x-pack/docs/en/watcher/condition/array-compare.asciidoc index 48b073e202c34..b413690865e60 100644 --- a/x-pack/docs/en/watcher/condition/array-compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/array-compare.asciidoc @@ -34,6 +34,7 @@ than or equal to 25: } } -------------------------------------------------- +// NOTCONSOLE <1> The path to the array in the execution context that you want to evaluate, specified in dot notation. <2> The path to the field in each array element that you want to evaluate. diff --git a/x-pack/docs/en/watcher/condition/compare.asciidoc b/x-pack/docs/en/watcher/condition/compare.asciidoc index fc30a44bafe49..d58638e6fe472 100644 --- a/x-pack/docs/en/watcher/condition/compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/compare.asciidoc @@ -49,6 +49,7 @@ search result>> is greater than or equal to 5: } } -------------------------------------------------- +// NOTCONSOLE <1> Use dot notation to reference a value in the execution context. <2> Specify a comparison operator and the value you want to compare against. @@ -68,6 +69,7 @@ of the form `<{expression}>`. For example, the following expression returns } } -------------------------------------------------- +// NOTCONSOLE You can also compare two values in the execution context by specifying the compared value as a path of the form of `{{path}}`. For example, the following @@ -85,6 +87,7 @@ to the `ctx.payload.aggregations.handled.buckets.true.doc_count`: } } -------------------------------------------------- +// NOTCONSOLE ==== Accessing Values in the Execution Context diff --git a/x-pack/docs/en/watcher/condition/never.asciidoc b/x-pack/docs/en/watcher/condition/never.asciidoc index d3d5cf39a4465..b8cad0b8c04d5 100644 --- a/x-pack/docs/en/watcher/condition/never.asciidoc +++ b/x-pack/docs/en/watcher/condition/never.asciidoc @@ -17,3 +17,4 @@ you specify the condition type and associate it with an empty object: "never" : {} } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/condition/script.asciidoc b/x-pack/docs/en/watcher/condition/script.asciidoc index 7e5159ada92cd..5e94551a5ef17 100644 --- a/x-pack/docs/en/watcher/condition/script.asciidoc +++ b/x-pack/docs/en/watcher/condition/script.asciidoc @@ -19,6 +19,7 @@ The following snippet configures an inline `script` condition that always return "script" : "return true" } -------------------------------------------------- +// NOTCONSOLE This example defines a script as a simple string. This format is actually a shortcut for defining an <> script. The @@ -43,6 +44,7 @@ parameter, `result`: } } -------------------------------------------------- +// NOTCONSOLE [[condition-script-inline]] ==== Inline Scripts @@ -59,6 +61,7 @@ always returns `true`. } } -------------------------------------------------- +// NOTCONSOLE [[condition-script-stored]] ==== Stored Scripts @@ -74,6 +77,7 @@ in Elasticsearch. The following snippet shows how to refer to a script by its `i } } -------------------------------------------------- +// NOTCONSOLE As with <> scripts, you can also specify the script language and parameters: @@ -88,6 +92,7 @@ scripts, you can also specify the script language and parameters: } } -------------------------------------------------- +// NOTCONSOLE [[accessing-watch-payload]] ==== Accessing the Watch Payload @@ -121,6 +126,7 @@ threshold: } } -------------------------------------------------- +// NOTCONSOLE When you're using a scripted condition to evaluate an Elasticsearch response, keep in mind that the fields in the response are no longer in their native data @@ -132,6 +138,7 @@ you need to parse the `@timestamp` string into a `DateTime`. For example: -------------------------------------------------- org.elasticsearch.common.joda.time.DateTime.parse(@timestamp) -------------------------------------------------- +// NOTCONSOLE You can reference the following variables in the watch context: diff --git a/x-pack/docs/en/watcher/customizing-watches.asciidoc b/x-pack/docs/en/watcher/customizing-watches.asciidoc index 66204a6d0f5b9..fc45bc636bfc5 100644 --- a/x-pack/docs/en/watcher/customizing-watches.asciidoc +++ b/x-pack/docs/en/watcher/customizing-watches.asciidoc @@ -36,6 +36,7 @@ fields in the payload: } } ------------------------------------- +// NOTCONSOLE See <> for more details. @@ -74,6 +75,7 @@ For example, the following `search` input loads the latest VIX quote: } } -------------------------------------------------- +// NOTCONSOLE <1> Will resolve to today's daily quotes index See <> for more details. @@ -105,7 +107,7 @@ Amsterdam using http://openweathermap.org/appid[OpenWeatherMap] online service: } } -------------------------------------------------- - +// NOTCONSOLE See <> for more details. [[chaining-inputs]] @@ -146,7 +148,7 @@ returned any hits: "compare" : { "ctx.payload.hits.total" : { "gt" : 0 }} }, -------------------------------------------------- - +// NOTCONSOLE See <> for more details. ==== Powerful Comparison Logic with the Script Condition @@ -176,7 +178,7 @@ VIX quote loaded by the `http` input is either greater than 5% or lower than -5% } } -------------------------------------------------- - +// NOTCONSOLE See <> for more details. [[using-transforms]] @@ -231,6 +233,7 @@ attaches the payload data to the message: } } -------------------------------------------------- +// NOTCONSOLE <1> The id of the action <2> The action type, in this case it's an `email` action @@ -261,6 +264,7 @@ creates a new issue in GitHub } } -------------------------------------------------- +// NOTCONSOLE <1> `` is the owner of the GitHub repo and `` is the name of the repo. <2> The username that creates the issue <3> The password of that user diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc index 041a8ec81a7e3..d933a38d7d670 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc @@ -13,6 +13,7 @@ To ingest this data with Logstash: . Create a Logstash configuration file that uses the {logstash-ref}/plugins-inputs-stdin.html[Logstash standard input] and the {logstash-ref}/plugins-outputs-stdout.html[Logstash standard output] and save it in `logstash-{version}` directory as `livestream.conf`: + +-- [source,ruby] ---------------------------------------------------------- input { @@ -38,16 +39,20 @@ output { <2> } ---------------------------------------------------------- +// NOTCONSOLE <1> The meetup data stream is formatted in JSON. <2> Index the meetup data into Elasticsearch. +-- . To start indexing the meetup data, pipe the RSVP stream into Logstash and specify your `livestream.conf` configuration file. + -[source,she] +-- +[source,shell] ---------------------------------------------------------- - curl http://stream.meetup.com/2/rsvps | bin/logstash -f livestream.conf --------------------------------------------------------- +// NOTCONSOLE +-- Now that you're indexing the meetup RSVPs, you can set up a watch that lets you know about events you might be interested in. For example, let's create a watch that runs every hour, looks for events that talk about about _Open Source_, and sends an email with information about the events. @@ -56,6 +61,7 @@ To set up the watch: . Specify how often you want to run the watch by adding a schedule trigger to the watch: + +-- [source,js] -------------------------------------------------- { @@ -65,8 +71,11 @@ To set up the watch: } }, -------------------------------------------------- +// NOTCONSOLE +-- . Load data into the watch payload by creating an input that searches the meetup data for events that have _Open Source_ as a topic. You can use aggregations to group the data by city, consolidate references to the same events, and sort the events by date. + +-- [source,js] ------------------------------------------------- "input": { @@ -135,19 +144,28 @@ To set up the watch: } }, -------------------------------------------------- +// NOTCONSOLE <1> Elasticsearch Date math is used to select the Logstash indices that contain the meetup data. The second pattern is needed in case the previous hour crosses days. <2> Find all of the RSVPs with `Open Source` as a topic. <3> Group the RSVPs by city. <4> Consolidate multiple RSVPs for the same event. <5> Sort the events so the latest events are listed first. <6> Group the events by name. +-- . To determine whether or not there are any Open Source events, add a compare condition that checks the watch payload to see if there were any search hits. ++ +-- [source,js] -------------------------------------------------- "compare" : { "ctx.payload.hits.total" : { "gt" : 0 }} -------------------------------------------------- +// NOTCONSOLE +-- + . To send an email when _Open Source_ events are found, add an email action: ++ +-- [source,js] -------------------------------------------------- "actions": { @@ -167,6 +185,8 @@ To set up the watch: } } --------------------------------------------------- +// NOTCONSOLE +-- NOTE: To enable Watcher to send emails, you must configure an email account in `elasticsearch.yml`. For more information, see <>. diff --git a/x-pack/docs/en/watcher/how-watcher-works.asciidoc b/x-pack/docs/en/watcher/how-watcher-works.asciidoc index b47b83dbf1ede..2bd19c1a41e02 100644 --- a/x-pack/docs/en/watcher/how-watcher-works.asciidoc +++ b/x-pack/docs/en/watcher/how-watcher-works.asciidoc @@ -283,6 +283,7 @@ The following snippet shows the basic structure of the _Watch Execution Context_ "vars" : { ... } <6> } ---------------------------------------------------------------------- +// NOTCONSOLE <1> Any static metadata specified in the watch definition. <2> The current watch payload. <3> The id of the executing watch. @@ -348,6 +349,7 @@ in sent emails: } } ---------------------------------------------------------------------- +// NOTCONSOLE [float] [[inline-templates-scripts]] @@ -369,6 +371,7 @@ the context metadata. } } ---------------------------------------------------------------------- +// NOTCONSOLE For a script, you simply specify the inline script as the value of the `script` field. For example: @@ -379,6 +382,7 @@ field. For example: "script" : "return true" } ---------------------------------------------------------------------- +// NOTCONSOLE You can also explicitly specify the inline type by using a formal object definition as the field value. For example: @@ -395,6 +399,7 @@ definition as the field value. For example: } } ---------------------------------------------------------------------- +// NOTCONSOLE The formal object definition for a script would be: @@ -406,6 +411,7 @@ The formal object definition for a script would be: } } ---------------------------------------------------------------------- +// NOTCONSOLE [float] [[stored-templates-scripts]] @@ -436,3 +442,4 @@ references the `email_notification_subject` template: } } ---------------------------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/input/chain.asciidoc b/x-pack/docs/en/watcher/input/chain.asciidoc index 1984b60d45e20..9952773e7227a 100644 --- a/x-pack/docs/en/watcher/input/chain.asciidoc +++ b/x-pack/docs/en/watcher/input/chain.asciidoc @@ -38,6 +38,7 @@ path set by a `simple` input: } } -------------------------------------------------- +// NOTCONSOLE <1> The inputs in a chain are specified as an array to guarantee the order in which the inputs are processed. (JSON does not guarantee the order of arbitrary objects.) @@ -90,3 +91,4 @@ still be available in its original form in `ctx.payload.first`. } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/input/http.asciidoc b/x-pack/docs/en/watcher/input/http.asciidoc index 451903777d159..79d37d14a1bf4 100644 --- a/x-pack/docs/en/watcher/input/http.asciidoc +++ b/x-pack/docs/en/watcher/input/http.asciidoc @@ -40,6 +40,7 @@ index: } } -------------------------------------------------- +// NOTCONSOLE You can use the full Elasticsearch {ref}/query-dsl.html[Query DSL] to perform more sophisticated searches. For example, the following `http` input retrieves @@ -58,6 +59,7 @@ all documents that contain `event` in the `category` field: } } -------------------------------------------------- +// NOTCONSOLE ==== Calling Elasticsearch APIs @@ -82,6 +84,7 @@ Stats] API and enables the `human` attribute: } } -------------------------------------------------- +// NOTCONSOLE <1> Enabling this attribute returns the `bytes` values in the response in human readable format. @@ -110,6 +113,7 @@ a username and password to access `myservice`: } } -------------------------------------------------- +// NOTCONSOLE You can also pass in service-specific API keys and other information through the `params` attribute. For example, the following `http` @@ -131,6 +135,7 @@ http://openweathermap.org/appid[OpenWeatherMap] service: } } -------------------------------------------------- +// NOTCONSOLE ==== Using Templates @@ -153,6 +158,7 @@ and restrict the results to documents added within the last five minutes: } } -------------------------------------------------- +// NOTCONSOLE ==== Accessing the HTTP Response diff --git a/x-pack/docs/en/watcher/input/search.asciidoc b/x-pack/docs/en/watcher/input/search.asciidoc index a9782c482bd37..7ce67bfc1dc2b 100644 --- a/x-pack/docs/en/watcher/input/search.asciidoc +++ b/x-pack/docs/en/watcher/input/search.asciidoc @@ -32,6 +32,7 @@ documents from the `logs` index: } } -------------------------------------------------- +// NOTCONSOLE You can use date math and wildcards when specifying indices. For example, the following input loads the latest VIXZ quote from today's daily quotes index: @@ -57,6 +58,7 @@ the following input loads the latest VIXZ quote from today's daily quotes index: } } -------------------------------------------------- +// NOTCONSOLE ==== Extracting Specific Fields @@ -78,6 +80,7 @@ watch payload: } }, -------------------------------------------------- +// NOTCONSOLE ==== Using Templates @@ -105,6 +108,7 @@ parameter: ... } -------------------------------------------------- +// NOTCONSOLE ==== Applying Conditions @@ -131,6 +135,7 @@ check if the search returned more than five hits: ... } -------------------------------------------------- +// NOTCONSOLE ==== Accessing the Search Results diff --git a/x-pack/docs/en/watcher/input/simple.asciidoc b/x-pack/docs/en/watcher/input/simple.asciidoc index 3b7b4c5734c57..c756a4e5403e2 100644 --- a/x-pack/docs/en/watcher/input/simple.asciidoc +++ b/x-pack/docs/en/watcher/input/simple.asciidoc @@ -20,6 +20,7 @@ an object (`obj`): } } -------------------------------------------------- +// NOTCONSOLE For example, the following watch uses the `simple` input to set the recipient name for a daily reminder email: @@ -48,3 +49,4 @@ name for a daily reminder email: } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 0351c9b8c1214..8241d7b0cb442 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -52,6 +52,7 @@ part of the definition of the `my_webhook` action. ] } -------------------------------------------------- +// NOTCONSOLE <1> A watch level `transform` <2> An action level `transform` diff --git a/x-pack/docs/en/watcher/transform/chain.asciidoc b/x-pack/docs/en/watcher/transform/chain.asciidoc index f17b05c71b4cc..9ad27fe48ed81 100644 --- a/x-pack/docs/en/watcher/transform/chain.asciidoc +++ b/x-pack/docs/en/watcher/transform/chain.asciidoc @@ -33,6 +33,7 @@ following snippet: ] } -------------------------------------------------- +// NOTCONSOLE <1> The `chain` transform definition <2> The first transform in the chain (in this case, a `search` transform) <3> The second and final transform in the chain (in this case, a `script` diff --git a/x-pack/docs/en/watcher/transform/script.asciidoc b/x-pack/docs/en/watcher/transform/script.asciidoc index 0a3bd401dc744..f1a46d482d9e6 100644 --- a/x-pack/docs/en/watcher/transform/script.asciidoc +++ b/x-pack/docs/en/watcher/transform/script.asciidoc @@ -20,6 +20,7 @@ TIP: The `script` transform is often useful when used in combination with the } } -------------------------------------------------- +// NOTCONSOLE <1> A simple `painless` script that creates a new payload with a single `time` field holding the scheduled time. diff --git a/x-pack/docs/en/watcher/transform/search.asciidoc b/x-pack/docs/en/watcher/transform/search.asciidoc index eaf7c80c6cbb3..56f9304d986ce 100644 --- a/x-pack/docs/en/watcher/transform/search.asciidoc +++ b/x-pack/docs/en/watcher/transform/search.asciidoc @@ -18,6 +18,7 @@ defined on the watch level: } } -------------------------------------------------- +// NOTCONSOLE Like every other search based construct, one can make use of the full search API supported by Elasticsearch. For example, the following search transform @@ -41,6 +42,7 @@ execute a search over all events indices, matching events with `error` priority: } } -------------------------------------------------- +// NOTCONSOLE The following table lists all available settings for the search transform: @@ -129,6 +131,7 @@ time of the watch: } } -------------------------------------------------- +// NOTCONSOLE The model of the template is a union between the provided `template.params` settings and the <>. @@ -173,3 +176,4 @@ The following is an example of using templates that refer to provided parameters } } -------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index 57d330510971d..57a6ebdfd92ef 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -184,6 +184,7 @@ that triggers every day at noon: ... } -------------------------------------------------- +// NOTCONSOLE ===== Configuring a Multiple Times Cron Schedule @@ -207,6 +208,7 @@ minute during the weekend: ... } -------------------------------------------------- +// NOTCONSOLE [[croneval]] ===== Verifying Cron Expressions diff --git a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc index e3165695e6aa8..e729335d59b29 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc @@ -28,6 +28,7 @@ day at 5:00 PM: } } -------------------------------------------------- +// NOTCONSOLE ===== Configuring a Multiple Times Daily Schedule @@ -45,6 +46,7 @@ triggers at `00:00`, `12:00`, and `17:00` every day. } } -------------------------------------------------- +// NOTCONSOLE [[specifying-times-using-objects]] ===== Specifying Times Using Objects @@ -69,6 +71,7 @@ For example, the following `daily` schedule triggers once every day at 5:00 PM: } } -------------------------------------------------- +// NOTCONSOLE To specify multiple times using the object notation, you specify multiple hours or minutes as an array. For example, following `daily` schedule triggers at @@ -89,3 +92,4 @@ or minutes as an array. For example, following `daily` schedule triggers at } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc index 48cc9dc2aa4a8..9ec750eebcd2b 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc @@ -28,6 +28,7 @@ For example, the following `hourly` schedule triggers at minute 30 every hour-- } } -------------------------------------------------- +// NOTCONSOLE ===== Configuring a Multiple Times Hourly Schedule @@ -46,3 +47,4 @@ triggers every 15 minutes every hour--`12:00`, `12:15`, `12:30`, `12:45`, } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc index b65c16646e176..e534181ec0c2f 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc @@ -34,3 +34,4 @@ For example, the following `interval` schedule triggers every five minutes: } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc index e6bf292d91811..d2cfe409992a7 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc @@ -26,6 +26,7 @@ on the 10th of each month at noon: } } -------------------------------------------------- +// NOTCONSOLE NOTE: You can also specify the day and time with the `day` and `time` attributes, they are interchangeable with `on` and `at`. @@ -50,6 +51,7 @@ schedule triggers at 12:00 PM on the 10th of each month and at 5:00 PM on the } } -------------------------------------------------- +// NOTCONSOLE Alternatively, you can specify days and times in an object that has `on` and `at` attributes that contain an array of values. For example, the following `monthly` @@ -68,3 +70,4 @@ schedule triggers at 12:00 AM and 12:00 PM on the 10th and 20th of each month. } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc index a5ac52d0e0d01..d6a403cb125c6 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc @@ -32,6 +32,7 @@ triggers once a week on Friday at 5:00 PM: } } -------------------------------------------------- +// NOTCONSOLE NOTE: You can also specify the day and time with the `day` and `time` attributes, they are interchangeable with `on` and `at`. @@ -55,6 +56,7 @@ schedule triggers every Tuesday at 12:00 PM and every Friday at 5:00 PM: } } -------------------------------------------------- +// NOTCONSOLE Alternatively, you can specify days and times in an object that has `on` and `minute` attributes that contain an array of values. For example, the following @@ -73,3 +75,4 @@ Alternatively, you can specify days and times in an object that has `on` and } } -------------------------------------------------- +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc index 9ea9e1d1b47bc..d11cc5d072787 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc @@ -37,6 +37,7 @@ example, the following `yearly` schedule triggers once a year at noon on January } } -------------------------------------------------- +// NOTCONSOLE NOTE: You can also specify the month, day, and time with the `month`, `day`, and `time` attributes, they are interchangeable with `in`, `on`, and `at`. @@ -61,6 +62,7 @@ on July 20th. } } -------------------------------------------------- +// NOTCONSOLE Alternatively, you can specify the months, days, and times in an object that has `in`, `on`, and `minute` attributes that contain an array of values. For example, @@ -81,3 +83,4 @@ January 20th, December 10th, and December 20th. } } -------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc index 8b793142ecc2b..20d599f8f5215 100644 --- a/x-pack/docs/en/watcher/troubleshooting.asciidoc +++ b/x-pack/docs/en/watcher/troubleshooting.asciidoc @@ -30,6 +30,8 @@ mappings: -------------------------------------------------- DELETE .watches -------------------------------------------------- +// CONSOLE +// TEST[skip:index deletion] + . Disable direct access to the `.watches` index: .. Stop the Elasticsearch node. From 1a3eac079cd7e7fa682c1d589487e992f7ee9c13 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Sat, 23 Jun 2018 09:39:17 +0200 Subject: [PATCH 30/31] Add get field mappings to High Level REST API Client Relates to #27205 (cherry picked from commit b7ef75f) --- .../elasticsearch/client/IndicesClient.java | 31 +++++ .../client/RequestConverters.java | 20 +++ .../elasticsearch/client/IndicesClientIT.java | 38 ++++++ .../client/RequestConvertersTests.java | 64 ++++++++- .../IndicesClientDocumentationIT.java | 106 +++++++++++++++ .../indices/get_field_mappings.asciidoc | 86 ++++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../mapping/get/GetFieldMappingsResponse.java | 123 +++++++++++++++++- .../get/GetFieldMappingsResponseTests.java | 100 +++++++++++++- 9 files changed, 562 insertions(+), 8 deletions(-) create mode 100644 docs/java-rest/high-level/indices/get_field_mappings.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 922da32d1fa9d..5c51aa17eec47 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -38,6 +38,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -268,6 +270,35 @@ public void getMappingsAsync(GetMappingsRequest getMappingsRequest, RequestOptio GetMappingsResponse::fromXContent, listener, emptySet()); } + /** + * Retrieves the field mappings on an index or indices using the Get Field Mapping API. + * See + * Get Field Mapping API on elastic.co + * @param getFieldMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetFieldMappingsResponse getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getFieldMappingsRequest, RequestConverters::getFieldMapping, options, + GetFieldMappingsResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieves the field mappings on an index on indices using the Get Field Mapping API. + * See + * Get Field Mapping API on elastic.co + * @param getFieldMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, RequestConverters::getFieldMapping, options, + GetFieldMappingsResponse::fromXContent, listener, emptySet()); + } + /** * Updates aliases using the Index Aliases API. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 67a8825afb1a7..c6c53501e0dd6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -232,6 +233,25 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOExcep return request; } + static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) throws IOException { + String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); + String[] types = getFieldMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.types(); + String[] fields = getFieldMappingsRequest.fields() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.fields(); + + String endpoint = new EndpointBuilder().addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_mapping").addCommaSeparatedPathParts(types) + .addPathPartAsIs("field").addCommaSeparatedPathParts(fields) + .build(); + + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions()); + parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults()); + parameters.withLocal(getFieldMappingsRequest.local()); + return request; + } + static Request refresh(RefreshRequest refreshRequest) { String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_refresh")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 1f116a4e33270..ba910f91dc855 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -43,6 +43,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -75,6 +77,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -388,6 +391,41 @@ public void testGetMapping() throws IOException { assertThat(mappings, equalTo(expected)); } + public void testGetFieldMapping() throws IOException { + String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + + PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); + putMappingRequest.type("_doc"); + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + putMappingRequest.source(mappingBuilder); + + PutMappingResponse putMappingResponse = + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + assertTrue(putMappingResponse.isAcknowledged()); + + GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest() + .indices(indexName) + .types("_doc") + .fields("field"); + + GetFieldMappingsResponse getFieldMappingsResponse = + execute(getFieldMappingsRequest, + highLevelClient().indices()::getFieldMapping, + highLevelClient().indices()::getFieldMappingAsync); + + final Map fieldMappingMap = + getFieldMappingsResponse.mappings().get(indexName).get("_doc"); + + final GetFieldMappingsResponse.FieldMappingMetaData metaData = + new GetFieldMappingsResponse.FieldMappingMetaData("field", + new BytesArray("{\"field\":{\"type\":\"text\"}}")); + assertThat(fieldMappingMap, equalTo(Collections.singletonMap("field", metaData))); + } + public void testDeleteIndex() throws IOException { { // Delete index if exists diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 32c378dddaf91..e416b3bd29fe8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -466,6 +467,61 @@ public void testGetMapping() throws IOException { assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); } + public void testGetFieldMapping() throws IOException { + GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest(); + + String[] indices = Strings.EMPTY_ARRAY; + if (randomBoolean()) { + indices = randomIndicesNames(0, 5); + getFieldMappingsRequest.indices(indices); + } else if (randomBoolean()) { + getFieldMappingsRequest.indices((String[]) null); + } + + String type = null; + if (randomBoolean()) { + type = randomAlphaOfLengthBetween(3, 10); + getFieldMappingsRequest.types(type); + } else if (randomBoolean()) { + getFieldMappingsRequest.types((String[]) null); + } + + String[] fields = null; + if (randomBoolean()) { + fields = new String[randomIntBetween(1, 5)]; + for (int i = 0; i < fields.length; i++) { + fields[i] = randomAlphaOfLengthBetween(3, 10); + } + getFieldMappingsRequest.fields(fields); + } else if (randomBoolean()) { + getFieldMappingsRequest.fields((String[]) null); + } + + Map expectedParams = new HashMap<>(); + + setRandomIndicesOptions(getFieldMappingsRequest::indicesOptions, getFieldMappingsRequest::indicesOptions, expectedParams); + setRandomLocal(getFieldMappingsRequest::local, expectedParams); + + Request request = RequestConverters.getFieldMapping(getFieldMappingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + if (type != null) { + endpoint.add(type); + } + endpoint.add("field"); + if (fields != null) { + endpoint.add(String.join(",", fields)); + } + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + + assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + } + public void testDeleteIndex() { String[] indices = randomIndicesNames(0, 5); DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); @@ -2272,16 +2328,20 @@ private static void setRandomHumanReadable(GetIndexRequest request, Map request, Map expectedParams) { + private static void setRandomLocal(Consumer setter, Map expectedParams) { if (randomBoolean()) { boolean local = randomBoolean(); - request.local(local); + setter.accept(local); if (local) { expectedParams.put("local", String.valueOf(local)); } } } + private static void setRandomLocal(MasterNodeReadRequest request, Map expectedParams) { + setRandomLocal(request::local, expectedParams); + } + private static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { if (randomBoolean()) { String timeout = randomTimeValue(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 800200c64b942..d5bc5f96395a4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -41,6 +41,8 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -703,6 +705,110 @@ public void onFailure(Exception e) { } } + public void testGetFieldMapping() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); + assertTrue(createIndexResponse.isAcknowledged()); + PutMappingRequest request = new PutMappingRequest("twitter"); + request.type("tweet"); + request.source( + "{\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"timestamp\": {\n" + + " \"type\": \"date\"\n" + + " }\n" + + " }\n" + + "}", // <1> + XContentType.JSON); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); + assertTrue(putMappingResponse.isAcknowledged()); + } + + // tag::get-field-mapping-request + GetFieldMappingsRequest request = new GetFieldMappingsRequest(); // <1> + request.indices("twitter"); // <2> + request.types("tweet"); // <3> + request.fields("message", "timestamp"); // <4> + // end::get-field-mapping-request + + // tag::get-field-mapping-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::get-field-mapping-request-indicesOptions + + // tag::get-field-mapping-request-local + request.local(true); // <1> + // end::get-field-mapping-request-local + + { + + // tag::get-field-mapping-execute + GetFieldMappingsResponse response = + client.indices().getFieldMapping(request, RequestOptions.DEFAULT); + // end::get-field-mapping-execute + + // tag::get-field-mapping-response + final Map>> mappings = + response.mappings();// <1> + final Map typeMappings = + mappings.get("twitter").get("tweet"); // <2> + final GetFieldMappingsResponse.FieldMappingMetaData metaData = + typeMappings.get("message");// <3> + + final String fullName = metaData.fullName();// <4> + final Map source = metaData.sourceAsMap(); // <5> + // end::get-field-mapping-response + } + + { + // tag::get-field-mapping-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetFieldMappingsResponse putMappingResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-field-mapping-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener latchListener = new LatchedActionListener<>(listener, latch); + listener = ActionListener.wrap(r -> { + final Map>> mappings = + r.mappings(); + final Map typeMappings = + mappings.get("twitter").get("tweet"); + final GetFieldMappingsResponse.FieldMappingMetaData metaData1 = typeMappings.get("message"); + + final String fullName = metaData1.fullName(); + final Map source = metaData1.sourceAsMap(); + latchListener.onResponse(r); + }, e -> { + latchListener.onFailure(e); + fail("should not fail"); + }); + + // tag::get-field-mapping-execute-async + client.indices().getFieldMappingAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::get-field-mapping-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + + } + + public void testOpenIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/indices/get_field_mappings.asciidoc b/docs/java-rest/high-level/indices/get_field_mappings.asciidoc new file mode 100644 index 0000000000000..3f5ff5aec6449 --- /dev/null +++ b/docs/java-rest/high-level/indices/get_field_mappings.asciidoc @@ -0,0 +1,86 @@ +[[java-rest-high-get-field-mappings]] +=== Get Field Mappings API + +[[java-rest-high-get-field-mappings-request]] +==== Get Field Mappings Request + +A `GetFieldMappingsRequest` can have an optional list of indices, optional list of types and the list of fields: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-request] +-------------------------------------------------- +<1> An empty request +<2> Setting the indices to fetch mapping for +<3> The types to be returned +<4> The fields to be returned + +==== Optional arguments +The following arguments can also optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-request-local] +-------------------------------------------------- +<1> The `local` flag (defaults to `false`) controls whether the aliases need +to be looked up in the local cluster state or in the cluster state held by +the elected master node + +[[java-rest-high-get-field-mappings-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-execute] +-------------------------------------------------- + +[[java-rest-high-get-field-mapping-async]] +==== Asynchronous Execution + +The asynchronous execution of a get mappings request requires both the +`GetFieldMappingsRequest` instance and an `ActionListener` instance to be passed to +the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-execute-async] +-------------------------------------------------- +<1> The `GetFieldMappingsRequest` to execute and the `ActionListener` to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method if +the execution successfully completed or using the `onFailure` method if it +failed. + +A typical listener for `GetMappingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-get-field-mapping-response]] +==== Get Field Mappings Response + +The returned `GetFieldMappingsResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-field-mapping-response] +-------------------------------------------------- +<1> Returning all requested indices fields' mappings +<2> Retrieving the mappings for a particular index and type +<3> Getting the mappings metadata for the `message` field +<4> Getting the full name of the field +<5> Getting the mapping source of the field + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 727088aa5737f..3caab5100ca0f 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -77,6 +77,7 @@ Index Management:: Mapping Management:: * <> +* <> Alias Management:: * <> @@ -98,6 +99,7 @@ include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/get_mappings.asciidoc[] +include::indices/get_field_mappings.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] include::indices/get_alias.asciidoc[] diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d837c1cbd199b..81b9812d61c5f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -20,13 +20,17 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.Mapper; @@ -34,13 +38,45 @@ import java.io.InputStream; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** Response object for {@link GetFieldMappingsRequest} API */ public class GetFieldMappingsResponse extends ActionResponse implements ToXContentFragment { + private static final ParseField MAPPINGS = new ParseField("mappings"); + + private static final ObjectParser>, String> PARSER = + new ObjectParser<>(MAPPINGS.getPreferredName(), true, HashMap::new); + + static { + PARSER.declareField((p, typeMappings, index) -> { + p.nextToken(); + while (p.currentToken() == XContentParser.Token.FIELD_NAME) { + final String typeName = p.currentName(); + + if (p.nextToken() == XContentParser.Token.START_OBJECT) { + final Map typeMapping = new HashMap<>(); + typeMappings.put(typeName, typeMapping); + + while (p.nextToken() == XContentParser.Token.FIELD_NAME) { + final String fieldName = p.currentName(); + final FieldMappingMetaData fieldMappingMetaData = FieldMappingMetaData.fromXContent(p); + typeMapping.put(fieldName, fieldMappingMetaData); + } + } else { + p.skipChildren(); + } + p.nextToken(); + } + }, MAPPINGS, ObjectParser.ValueType.OBJECT); + } + private Map>> mappings = emptyMap(); GetFieldMappingsResponse(Map>> mappings) { @@ -77,7 +113,7 @@ public FieldMappingMetaData fieldMappings(String index, String type, String fiel public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { for (Map.Entry>> indexEntry : mappings.entrySet()) { builder.startObject(indexEntry.getKey()); - builder.startObject("mappings"); + builder.startObject(MAPPINGS.getPreferredName()); for (Map.Entry> typeEntry : indexEntry.getValue().entrySet()) { builder.startObject(typeEntry.getKey()); for (Map.Entry fieldEntry : typeEntry.getValue().entrySet()) { @@ -93,9 +129,46 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public static GetFieldMappingsResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + + final Map>> mappings = new HashMap<>(); + if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + while (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + final String index = parser.currentName(); + + final Map> typeMappings = PARSER.parse(parser, index); + mappings.put(index, typeMappings); + + parser.nextToken(); + } + } + + return new GetFieldMappingsResponse(mappings); + } + public static class FieldMappingMetaData implements ToXContentFragment { public static final FieldMappingMetaData NULL = new FieldMappingMetaData("", BytesArray.EMPTY); + private static final ParseField FULL_NAME = new ParseField("full_name"); + private static final ParseField MAPPING = new ParseField("mapping"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("field_mapping_meta_data", true, + a -> new FieldMappingMetaData((String)a[0], (BytesReference)a[1]) + ); + + static { + PARSER.declareField(optionalConstructorArg(), + (p, c) -> p.text(), FULL_NAME, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), + (p, c) -> { + final XContentBuilder jsonBuilder = jsonBuilder().copyCurrentStructure(p); + final BytesReference bytes = BytesReference.bytes(jsonBuilder); + return bytes; + }, MAPPING, ObjectParser.ValueType.OBJECT); + } + private String fullName; private BytesReference source; @@ -122,18 +195,41 @@ BytesReference getSource() { return source; } + public static FieldMappingMetaData fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("full_name", fullName); + builder.field(FULL_NAME.getPreferredName(), fullName); if (params.paramAsBoolean("pretty", false)) { builder.field("mapping", sourceAsMap()); } else { try (InputStream stream = source.streamInput()) { - builder.rawField("mapping", stream, XContentType.JSON); + builder.rawField(MAPPING.getPreferredName(), stream, XContentType.JSON); } } return builder; } + + @Override + public String toString() { + return "FieldMappingMetaData{fullName='" + fullName + '\'' + ", source=" + source + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof FieldMappingMetaData)) return false; + FieldMappingMetaData that = (FieldMappingMetaData) o; + return Objects.equals(fullName, that.fullName) && + Objects.equals(source, that.source); + } + + @Override + public int hashCode() { + return Objects.hash(fullName, source); + } } @Override @@ -178,4 +274,25 @@ public void writeTo(StreamOutput out) throws IOException { } } } + + @Override + public String toString() { + return "GetFieldMappingsResponse{" + + "mappings=" + mappings + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof GetFieldMappingsResponse)) return false; + GetFieldMappingsResponse that = (GetFieldMappingsResponse) o; + return Objects.equals(mappings, that.mappings); + } + + @Override + public int hashCode() { + return Objects.hash(mappings); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java index 4dc396323c048..b6e785a4d05be 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java @@ -23,16 +23,22 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; -public class GetFieldMappingsResponseTests extends ESTestCase { +import static org.hamcrest.CoreMatchers.equalTo; - public void testSerialization() throws IOException { +public class GetFieldMappingsResponseTests extends AbstractStreamableXContentTestCase { + + public void testManualSerialization() throws IOException { Map>> mappings = new HashMap<>(); FieldMappingMetaData fieldMappingMetaData = new FieldMappingMetaData("my field", new BytesArray("{}")); mappings.put("index", Collections.singletonMap("type", Collections.singletonMap("field", fieldMappingMetaData))); @@ -49,4 +55,92 @@ public void testSerialization() throws IOException { } } } + + public void testManualJunkedJson() throws Exception { + // in fact random fields could be evaluated as proper mapping, while proper junk in this case is arrays and values + final String json = + "{\"index1\":{\"mappings\":" + + "{\"doctype0\":{\"field1\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}," + + "\"field0\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}}," + // junk here + + "\"junk1\": [\"field1\", {\"field2\":{}}]," + + "\"junk2\": [{\"field3\":{}}]," + + "\"junk3\": 42," + + "\"junk4\": \"Q\"," + + "\"doctype1\":{\"field1\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}," + + "\"field0\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}}}}," + + "\"index0\":{\"mappings\":" + + "{\"doctype0\":{\"field1\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}," + + "\"field0\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}}," + + "\"doctype1\":{\"field1\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}," + + "\"field0\":{\"full_name\":\"my field\",\"mapping\":{\"type\":\"keyword\"}}}}}}"; + + final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, json.getBytes("UTF-8")); + + final GetFieldMappingsResponse response = GetFieldMappingsResponse.fromXContent(parser); + + FieldMappingMetaData fieldMappingMetaData = + new FieldMappingMetaData("my field", new BytesArray("{\"type\":\"keyword\"}")); + Map fieldMapping = new HashMap<>(); + fieldMapping.put("field0", fieldMappingMetaData); + fieldMapping.put("field1", fieldMappingMetaData); + + Map> typeMapping = new HashMap<>(); + typeMapping.put("doctype0", fieldMapping); + typeMapping.put("doctype1", fieldMapping); + + Map>> mappings = new HashMap<>(); + mappings.put("index0", typeMapping); + mappings.put("index1", typeMapping); + + final Map>> responseMappings = response.mappings(); + assertThat(responseMappings, equalTo(mappings)); + } + + @Override + protected GetFieldMappingsResponse doParseInstance(XContentParser parser) throws IOException { + return GetFieldMappingsResponse.fromXContent(parser); + } + + @Override + protected GetFieldMappingsResponse createBlankInstance() { + return new GetFieldMappingsResponse(); + } + + @Override + protected GetFieldMappingsResponse createTestInstance() { + return new GetFieldMappingsResponse(randomMapping()); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow random fields at the level of `index` and `index.mappings.doctype.field` + // otherwise random field could be evaluated as index name or type name + return s -> false == (s.matches("(?[^.]+)") + || s.matches("(?[^.]+)\\.mappings\\.(?[^.]+)\\.(?[^.]+)")); + } + + private Map>> randomMapping() { + Map>> mappings = new HashMap<>(); + + int indices = randomInt(10); + for(int i = 0; i < indices; i++) { + final Map> doctypesMappings = new HashMap<>(); + int doctypes = randomInt(10); + for(int j = 0; j < doctypes; j++) { + Map fieldMappings = new HashMap<>(); + int fields = randomInt(10); + for(int k = 0; k < fields; k++) { + final String mapping = randomBoolean() ? "{\"type\":\"string\"}" : "{\"type\":\"keyword\"}"; + FieldMappingMetaData metaData = + new FieldMappingMetaData("my field", new BytesArray(mapping)); + fieldMappings.put("field" + k, metaData); + } + doctypesMappings.put("doctype" + j, fieldMappings); + } + mappings.put("index" + i, doctypesMappings); + } + return mappings; + } } From b66ce89497b0c8a581a84fa08a9d360db638cc4f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 18 Jun 2018 15:05:34 -0400 Subject: [PATCH 31/31] Avoid sending duplicate remote failed shard requests (#31313) Today if a write replication request fails, we will send a shard-failed message to the master node to fail that replica. However, if there are many ongoing write replication requests and the master node is busy, we might overwhelm the cluster and the master node with many shard-failed requests. This commit tries to minimize the shard-failed requests in the above scenario by caching the ongoing shard-failed requests. This issue was discussed at https://discuss.elastic.co/t/half-dead-node-lead-to-cluster-hang/113658/25. --- .../action/shard/ShardStateAction.java | 123 ++++++++++++++- ...rdFailedClusterStateTaskExecutorTests.java | 64 ++++---- .../action/shard/ShardStateActionTests.java | 146 +++++++++++++++++- 3 files changed, 299 insertions(+), 34 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 915e900b9ddf1..f690efa4c9a0c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -25,10 +25,10 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; @@ -48,6 +48,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.NodeClosedException; @@ -68,7 +69,9 @@ import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; public class ShardStateAction extends AbstractComponent { @@ -80,6 +83,10 @@ public class ShardStateAction extends AbstractComponent { private final ClusterService clusterService; private final ThreadPool threadPool; + // a list of shards that failed during replication + // we keep track of these shards in order to avoid sending duplicate failed shard requests for a single failing shard. + private final ConcurrentMap remoteFailedShardsCache = ConcurrentCollections.newConcurrentMap(); + @Inject public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, AllocationService allocationService, RoutingService routingService, ThreadPool threadPool) { @@ -146,8 +153,35 @@ private static boolean isMasterChannelException(TransportException exp) { */ public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, boolean markAsStale, final String message, @Nullable final Exception failure, Listener listener) { assert primaryTerm > 0L : "primary term should be strictly positive"; - FailedShardEntry shardEntry = new FailedShardEntry(shardId, allocationId, primaryTerm, message, failure, markAsStale); - sendShardAction(SHARD_FAILED_ACTION_NAME, clusterService.state(), shardEntry, listener); + final FailedShardEntry shardEntry = new FailedShardEntry(shardId, allocationId, primaryTerm, message, failure, markAsStale); + final CompositeListener compositeListener = new CompositeListener(listener); + final CompositeListener existingListener = remoteFailedShardsCache.putIfAbsent(shardEntry, compositeListener); + if (existingListener == null) { + sendShardAction(SHARD_FAILED_ACTION_NAME, clusterService.state(), shardEntry, new Listener() { + @Override + public void onSuccess() { + try { + compositeListener.onSuccess(); + } finally { + remoteFailedShardsCache.remove(shardEntry); + } + } + @Override + public void onFailure(Exception e) { + try { + compositeListener.onFailure(e); + } finally { + remoteFailedShardsCache.remove(shardEntry); + } + } + }); + } else { + existingListener.addListener(listener); + } + } + + int remoteShardFailedCacheSize() { + return remoteFailedShardsCache.size(); } /** @@ -414,6 +448,23 @@ public String toString() { components.add("markAsStale [" + markAsStale + "]"); return String.join(", ", components); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FailedShardEntry that = (FailedShardEntry) o; + // Exclude message and exception from equals and hashCode + return Objects.equals(this.shardId, that.shardId) && + Objects.equals(this.allocationId, that.allocationId) && + primaryTerm == that.primaryTerm && + markAsStale == that.markAsStale; + } + + @Override + public int hashCode() { + return Objects.hash(shardId, allocationId, primaryTerm, markAsStale); + } } public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) { @@ -585,6 +636,72 @@ default void onFailure(final Exception e) { } + /** + * A composite listener that allows registering multiple listeners dynamically. + */ + static final class CompositeListener implements Listener { + private boolean isNotified = false; + private Exception failure = null; + private final List listeners = new ArrayList<>(); + + CompositeListener(Listener listener) { + listeners.add(listener); + } + + void addListener(Listener listener) { + final boolean ready; + synchronized (this) { + ready = this.isNotified; + if (ready == false) { + listeners.add(listener); + } + } + if (ready) { + if (failure != null) { + listener.onFailure(failure); + } else { + listener.onSuccess(); + } + } + } + + private void onCompleted(Exception failure) { + synchronized (this) { + this.failure = failure; + this.isNotified = true; + } + RuntimeException firstException = null; + for (Listener listener : listeners) { + try { + if (failure != null) { + listener.onFailure(failure); + } else { + listener.onSuccess(); + } + } catch (RuntimeException innerEx) { + if (firstException == null) { + firstException = innerEx; + } else { + firstException.addSuppressed(innerEx); + } + } + } + if (firstException != null) { + throw firstException; + } + } + + @Override + public void onSuccess() { + onCompleted(null); + } + + @Override + public void onFailure(Exception failure) { + onCompleted(failure); + } + } + public static class NoLongerPrimaryShardException extends ElasticsearchException { public NoLongerPrimaryShardException(ShardId shardId, String msg) { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 9eeef54dfd796..01d0c518c1be7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -22,11 +22,11 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.action.shard.ShardStateAction.FailedShardEntry; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.action.shard.ShardStateAction.FailedShardEntry; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.routing.allocation.StaleShard; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; @@ -53,9 +54,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -131,10 +130,15 @@ ClusterState applyFailedShards(ClusterState currentState, List fail tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); ClusterStateTaskExecutor.ClusterTasksResult result = failingExecutor.execute(currentState, tasks); - Map taskResultMap = - failingTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure")))); - taskResultMap.putAll(nonExistentTasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success()))); - assertTaskResults(taskResultMap, result, currentState, false); + List> taskResultList = new ArrayList<>(); + for (FailedShardEntry failingTask : failingTasks) { + taskResultList.add(Tuple.tuple(failingTask, + ClusterStateTaskExecutor.TaskResult.failure(new RuntimeException("simulated applyFailedShards failure")))); + } + for (FailedShardEntry nonExistentTask : nonExistentTasks) { + taskResultList.add(Tuple.tuple(nonExistentTask, ClusterStateTaskExecutor.TaskResult.success())); + } + assertTaskResults(taskResultList, result, currentState, false); } public void testIllegalShardFailureRequests() throws Exception { @@ -147,14 +151,14 @@ public void testIllegalShardFailureRequests() throws Exception { tasks.add(new FailedShardEntry(failingTask.shardId, failingTask.allocationId, randomIntBetween(1, (int) primaryTerm - 1), failingTask.message, failingTask.failure, randomBoolean())); } - Map taskResultMap = - tasks.stream().collect(Collectors.toMap( - Function.identity(), - task -> ClusterStateTaskExecutor.TaskResult.failure(new ShardStateAction.NoLongerPrimaryShardException(task.shardId, - "primary term [" + task.primaryTerm + "] did not match current primary term [" + - currentState.metaData().index(task.shardId.getIndex()).primaryTerm(task.shardId.id()) + "]")))); + List> taskResultList = tasks.stream() + .map(task -> Tuple.tuple(task, ClusterStateTaskExecutor.TaskResult.failure( + new ShardStateAction.NoLongerPrimaryShardException(task.shardId, "primary term [" + + task.primaryTerm + "] did not match current primary term [" + + currentState.metaData().index(task.shardId.getIndex()).primaryTerm(task.shardId.id()) + "]")))) + .collect(Collectors.toList()); ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(currentState, tasks); - assertTaskResults(taskResultMap, result, currentState, false); + assertTaskResults(taskResultList, result, currentState, false); } public void testMarkAsStaleWhenFailingShard() throws Exception { @@ -251,44 +255,44 @@ private static void assertTasksSuccessful( ClusterState clusterState, boolean clusterStateChanged ) { - Map taskResultMap = - tasks.stream().collect(Collectors.toMap(Function.identity(), task -> ClusterStateTaskExecutor.TaskResult.success())); - assertTaskResults(taskResultMap, result, clusterState, clusterStateChanged); + List> taskResultList = tasks.stream() + .map(t -> Tuple.tuple(t, ClusterStateTaskExecutor.TaskResult.success())).collect(Collectors.toList()); + assertTaskResults(taskResultList, result, clusterState, clusterStateChanged); } private static void assertTaskResults( - Map taskResultMap, + List> taskResultList, ClusterStateTaskExecutor.ClusterTasksResult result, ClusterState clusterState, boolean clusterStateChanged ) { // there should be as many task results as tasks - assertEquals(taskResultMap.size(), result.executionResults.size()); + assertEquals(taskResultList.size(), result.executionResults.size()); - for (Map.Entry entry : taskResultMap.entrySet()) { + for (Tuple entry : taskResultList) { // every task should have a corresponding task result - assertTrue(result.executionResults.containsKey(entry.getKey())); + assertTrue(result.executionResults.containsKey(entry.v1())); // the task results are as expected - assertEquals(entry.getKey().toString(), entry.getValue().isSuccess(), result.executionResults.get(entry.getKey()).isSuccess()); + assertEquals(entry.v1().toString(), entry.v2().isSuccess(), result.executionResults.get(entry.v1()).isSuccess()); } List shards = clusterState.getRoutingTable().allShards(); - for (Map.Entry entry : taskResultMap.entrySet()) { - if (entry.getValue().isSuccess()) { + for (Tuple entry : taskResultList) { + if (entry.v2().isSuccess()) { // the shard was successfully failed and so should not be in the routing table for (ShardRouting shard : shards) { if (shard.assignedToNode()) { - assertFalse("entry key " + entry.getKey() + ", shard routing " + shard, - entry.getKey().getShardId().equals(shard.shardId()) && - entry.getKey().getAllocationId().equals(shard.allocationId().getId())); + assertFalse("entry key " + entry.v1() + ", shard routing " + shard, + entry.v1().getShardId().equals(shard.shardId()) && + entry.v1().getAllocationId().equals(shard.allocationId().getId())); } } } else { // check we saw the expected failure - ClusterStateTaskExecutor.TaskResult actualResult = result.executionResults.get(entry.getKey()); - assertThat(actualResult.getFailure(), instanceOf(entry.getValue().getFailure().getClass())); - assertThat(actualResult.getFailure().getMessage(), equalTo(entry.getValue().getFailure().getMessage())); + ClusterStateTaskExecutor.TaskResult actualResult = result.executionResults.get(entry.v1()); + assertThat(actualResult.getFailure(), instanceOf(entry.v2().getFailure().getClass())); + assertThat(actualResult.getFailure().getMessage(), equalTo(entry.v2().getFailure().getMessage())); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index bbd326ff2fedb..1d78cdeb98374 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -59,9 +59,10 @@ import org.junit.BeforeClass; import java.io.IOException; -import java.util.UUID; import java.util.Collections; +import java.util.UUID; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -73,6 +74,8 @@ import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -138,6 +141,7 @@ public void tearDown() throws Exception { clusterService.close(); transportService.close(); super.tearDown(); + assertThat(shardStateAction.remoteShardFailedCacheSize(), equalTo(0)); } @AfterClass @@ -381,6 +385,89 @@ public void onFailure(Exception e) { assertThat(failure.get().getMessage(), equalTo(catastrophicError.getMessage())); } + public void testCacheRemoteShardFailed() throws Exception { + final String index = "test"; + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + ShardRouting failedShard = getRandomShardRouting(index); + boolean markAsStale = randomBoolean(); + int numListeners = between(1, 100); + CountDownLatch latch = new CountDownLatch(numListeners); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + for (int i = 0; i < numListeners; i++) { + shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), + primaryTerm, markAsStale, "test", getSimulatedFailure(), new ShardStateAction.Listener() { + @Override + public void onSuccess() { + latch.countDown(); + } + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + } + CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); + assertThat(capturedRequests, arrayWithSize(1)); + transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); + latch.await(); + assertThat(transport.capturedRequests(), arrayWithSize(0)); + } + + public void testRemoteShardFailedConcurrently() throws Exception { + final String index = "test"; + final AtomicBoolean shutdown = new AtomicBoolean(false); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + ShardRouting[] failedShards = new ShardRouting[between(1, 5)]; + for (int i = 0; i < failedShards.length; i++) { + failedShards[i] = getRandomShardRouting(index); + } + Thread[] clientThreads = new Thread[between(1, 6)]; + int iterationsPerThread = scaledRandomIntBetween(50, 500); + Phaser barrier = new Phaser(clientThreads.length + 2); // one for master thread, one for the main thread + Thread masterThread = new Thread(() -> { + barrier.arriveAndAwaitAdvance(); + while (shutdown.get() == false) { + for (CapturingTransport.CapturedRequest request : transport.getCapturedRequestsAndClear()) { + if (randomBoolean()) { + transport.handleResponse(request.requestId, TransportResponse.Empty.INSTANCE); + } else { + transport.handleRemoteError(request.requestId, randomFrom(getSimulatedFailure())); + } + } + } + }); + masterThread.start(); + + AtomicInteger notifiedResponses = new AtomicInteger(); + for (int t = 0; t < clientThreads.length; t++) { + clientThreads[t] = new Thread(() -> { + barrier.arriveAndAwaitAdvance(); + for (int i = 0; i < iterationsPerThread; i++) { + ShardRouting failedShard = randomFrom(failedShards); + shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), + randomLongBetween(1, Long.MAX_VALUE), randomBoolean(), "test", getSimulatedFailure(), new ShardStateAction.Listener() { + @Override + public void onSuccess() { + notifiedResponses.incrementAndGet(); + } + @Override + public void onFailure(Exception e) { + notifiedResponses.incrementAndGet(); + } + }); + } + }); + clientThreads[t].start(); + } + barrier.arriveAndAwaitAdvance(); + for (Thread t : clientThreads) { + t.join(); + } + assertBusy(() -> assertThat(notifiedResponses.get(), equalTo(clientThreads.length * iterationsPerThread))); + shutdown.set(true); + masterThread.join(); + } + private ShardRouting getRandomShardRouting(String index) { IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index); ShardsIterator shardsIterator = indexRoutingTable.randomAllActiveShardsIt(); @@ -452,4 +539,61 @@ BytesReference serialize(Writeable writeable, Version version) throws IOExceptio return out.bytes(); } } + + public void testCompositeListener() throws Exception { + AtomicInteger successCount = new AtomicInteger(); + AtomicInteger failureCount = new AtomicInteger(); + Exception failure = randomBoolean() ? getSimulatedFailure() : null; + ShardStateAction.CompositeListener compositeListener = new ShardStateAction.CompositeListener(new ShardStateAction.Listener() { + @Override + public void onSuccess() { + successCount.incrementAndGet(); + } + @Override + public void onFailure(Exception e) { + assertThat(e, sameInstance(failure)); + failureCount.incrementAndGet(); + } + }); + int iterationsPerThread = scaledRandomIntBetween(100, 1000); + Thread[] threads = new Thread[between(1, 4)]; + Phaser barrier = new Phaser(threads.length + 1); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + barrier.arriveAndAwaitAdvance(); + for (int n = 0; n < iterationsPerThread; n++) { + compositeListener.addListener(new ShardStateAction.Listener() { + @Override + public void onSuccess() { + successCount.incrementAndGet(); + } + @Override + public void onFailure(Exception e) { + assertThat(e, sameInstance(failure)); + failureCount.incrementAndGet(); + } + }); + } + }); + threads[i].start(); + } + barrier.arriveAndAwaitAdvance(); + if (failure != null) { + compositeListener.onFailure(failure); + } else { + compositeListener.onSuccess(); + } + for (Thread t : threads) { + t.join(); + } + assertBusy(() -> { + if (failure != null) { + assertThat(successCount.get(), equalTo(0)); + assertThat(failureCount.get(), equalTo(threads.length*iterationsPerThread + 1)); + } else { + assertThat(successCount.get(), equalTo(threads.length*iterationsPerThread + 1)); + assertThat(failureCount.get(), equalTo(0)); + } + }); + } }