diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 1461dae5ae107..5e5207b279eff 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -379,7 +379,7 @@ You can choose which boxes to test by setting the `-Pvagrant.boxes` project prop the valid options for this property are: * `sample` - The default, only chooses ubuntu-1404 and centos-7 -* List of box names, comma separated (e.g. `oel-7,fedora-26`) - Chooses exactly the boxes listed. +* List of box names, comma separated (e.g. `oel-7,fedora-28`) - Chooses exactly the boxes listed. * `linux-all` - All linux boxes. * `windows-all` - All Windows boxes. If there are any Windows boxes which do not have images available when this value is provided, the build will fail. @@ -406,8 +406,8 @@ These are the linux flavors supported, all of which we provide images for * debian-9 aka stretch, the current debian stable distribution * centos-6 * centos-7 -* fedora-26 * fedora-27 +* fedora-28 * oel-6 aka Oracle Enterprise Linux 6 * oel-7 aka Oracle Enterprise Linux 7 * sles-12 @@ -512,7 +512,9 @@ into it vagrant ssh debian-9 -------------------------------------------- -Now inside the VM, to run the https://github.com/sstephenson/bats[bats] packaging tests +Now inside the VM, start the packaging tests from the terminal. There are two packaging +test projects. The old ones are written with https://github.com/sstephenson/bats[bats] +and only run on linux. To run them do -------------------------------------------- cd $PACKAGING_ARCHIVES @@ -524,18 +526,36 @@ sudo bats $BATS_TESTS/*.bats sudo bats $BATS_TESTS/20_tar_package.bats $BATS_TESTS/25_tar_plugins.bats -------------------------------------------- -To run the Java packaging tests, again inside the VM +The new packaging tests are written in Java and run on both linux and windows. On +linux (again, inside the VM) -------------------------------------------- -bash $PACKAGING_TESTS/run-tests.sh +# run the full suite +sudo bash $PACKAGING_TESTS/run-tests.sh + +# run specific test cases +sudo bash $PACKAGING_TESTS/run-tests.sh \ + org.elasticsearch.packaging.test.DefaultZipTests \ + org.elasticsearch.packaging.test.OssZipTests -------------------------------------------- -or on Windows +or on Windows, from a terminal running as Administrator -------------------------------------------- +# run the full suite powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 + +# run specific test cases +powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 ` + org.elasticsearch.packaging.test.DefaultZipTests ` + org.elasticsearch.packaging.test.OssZipTests -------------------------------------------- +Note that on Windows boxes when running from inside the GUI, you may have to log out and +back in to the `vagrant` user (password `vagrant`) for the environment variables that +locate the packaging tests and distributions to take effect, due to how vagrant provisions +Windows machines. + When you've made changes you want to test, keep the VM up and reload the tests and distributions inside by running (on the host) diff --git a/Vagrantfile b/Vagrantfile index 1c259c1125f00..d53c80754e637 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -97,15 +97,15 @@ Vagrant.configure(2) do |config| rpm_common config, box end end - 'fedora-26'.tap do |box| + 'fedora-27'.tap do |box| config.vm.define box, define_opts do |config| - config.vm.box = 'elastic/fedora-26-x86_64' + config.vm.box = 'elastic/fedora-27-x86_64' dnf_common config, box end end - 'fedora-27'.tap do |box| + 'fedora-28'.tap do |box| config.vm.define box, define_opts do |config| - config.vm.box = 'elastic/fedora-27-x86_64' + config.vm.box = 'elastic/fedora-28-x86_64' dnf_common config, box end end @@ -237,6 +237,7 @@ def linux_common(config, config.vm.provision 'markerfile', type: 'shell', inline: <<-SHELL touch /etc/is_vagrant_vm + touch /is_vagrant_vm # for consistency between linux and windows SHELL # This prevents leftovers from previous tests using the diff --git a/build.gradle b/build.gradle index 395e1f600c9cc..05ad5479e8dea 100644 --- a/build.gradle +++ b/build.gradle @@ -36,6 +36,16 @@ import java.nio.file.Files import java.nio.file.Path import java.security.MessageDigest +plugins { + id 'com.gradle.build-scan' version '1.13.2' +} +if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { + buildScan { + termsOfServiceUrl = 'https://gradle.com/terms-of-service' + termsOfServiceAgree = 'yes' + } +} + // common maven publishing configuration subprojects { group = 'org.elasticsearch' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 72d71f25f69f2..6e8a5fd15edf1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -23,8 +23,8 @@ class VagrantTestPlugin implements Plugin { 'centos-7', 'debian-8', 'debian-9', - 'fedora-26', 'fedora-27', + 'fedora-28', 'oel-6', 'oel-7', 'opensuse-42', @@ -52,6 +52,8 @@ class VagrantTestPlugin implements Plugin { static final List DISTRIBUTIONS = unmodifiableList([ 'archives:tar', 'archives:oss-tar', + 'archives:zip', + 'archives:oss-zip', 'packages:rpm', 'packages:oss-rpm', 'packages:deb', @@ -242,13 +244,27 @@ class VagrantTestPlugin implements Plugin { Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { dependsOn copyPackagingTests file "${testsDir}/run-tests.sh" - contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}" + contents """\ + if [ "\$#" -eq 0 ]; then + test_args=( "${-> project.extensions.esvagrant.testClass}" ) + else + test_args=( "\$@" ) + fi + java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" + """ } Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { dependsOn copyPackagingTests file "${testsDir}/run-tests.ps1" + // the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely + // a little trappy for those unfamiliar with powershell contents """\ - java -cp "\$Env:PACKAGING_TESTS/*" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass} + if (\$args.Count -eq 0) { + \$testArgs = @("${-> project.extensions.esvagrant.testClass}") + } else { + \$testArgs = \$args + } + java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs exit \$LASTEXITCODE """ } @@ -525,9 +541,10 @@ class VagrantTestPlugin implements Plugin { if (LINUX_BOXES.contains(box)) { javaPackagingTest.command = 'ssh' - javaPackagingTest.args = ['--command', 'bash "$PACKAGING_TESTS/run-tests.sh"'] + javaPackagingTest.args = ['--command', 'sudo bash "$PACKAGING_TESTS/run-tests.sh"'] } else { javaPackagingTest.command = 'winrm' + // winrm commands run as administrator javaPackagingTest.args = ['--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"'] } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a547982e3b613..c98e265792b5b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-59f2b7aec2 +lucene = 7.4.0-snapshot-cc2ee23050 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index a5a6b9f7bd271..c9526346e5bc1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -711,6 +712,16 @@ static Request createRepository(PutRepositoryRequest putRepositoryRequest) throw return request; } + static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addPathPart(deleteRepositoryRequest.name()).build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(deleteRepositoryRequest.timeout()); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5dbf2709d9988..68e32abb69dc0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -592,7 +590,7 @@ protected final Resp performRequest(Req reques throw validationException; } Request req = requestConverter.apply(request); - req.setHeaders(headers); + addHeaders(req, headers); Response response; try { response = client.performRequest(req); @@ -642,12 +640,19 @@ protected final void performRequestAsync(Req r listener.onFailure(e); return; } - req.setHeaders(headers); + addHeaders(req, headers); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req, responseListener); } + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } + } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index aec94586bee30..d969232f0d70f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -21,6 +21,8 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -90,4 +92,28 @@ public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, PutRepositoryResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Deletes a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, Header... headers) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, + DeleteRepositoryResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously deletes a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, + DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 617b35c4d40f3..0bd6ecef8fb5c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -73,12 +73,12 @@ public void initClients() throws IOException { final RestClient restClient = mock(RestClient.class); restHighLevelClient = new CustomRestClient(restClient); - doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0])) + doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next())) .when(restClient) .performRequest(any(Request.class)); doAnswer(inv -> mockPerformRequestAsync( - ((Request) inv.getArguments()[0]).getHeaders()[0], + ((Request) inv.getArguments()[0]).getHeaders().iterator().next(), (ResponseListener) inv.getArguments()[1])) .when(restClient) .performRequestAsync(any(Request.class), any(ResponseListener.class)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 4a0276e74d228..c5ee387d315cb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -1546,7 +1547,7 @@ public void testGetRepositories() { } public void testCreateRepository() throws IOException { - String repository = "repo"; + String repository = randomIndicesNames(1, 1)[0]; String endpoint = "/_snapshot/" + repository; Path repositoryLocation = PathUtils.get("."); PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); @@ -1555,10 +1556,10 @@ public void testCreateRepository() throws IOException { putRepositoryRequest.settings( Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .build()); + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .build()); Request request = RequestConverters.createRepository(putRepositoryRequest); assertThat(endpoint, equalTo(request.getEndpoint())); @@ -1566,6 +1567,24 @@ public void testCreateRepository() throws IOException { assertToXContentBody(putRepositoryRequest, request.getEntity()); } + public void testDeleteRepository() { + Map expectedParams = new HashMap<>(); + String repository = randomIndicesNames(1, 1)[0]; + + StringBuilder endpoint = new StringBuilder("/_snapshot/" + repository); + + DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(); + deleteRepositoryRequest.name(repository); + setRandomMasterTimeout(deleteRepositoryRequest, expectedParams); + setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = RequestConverters.deleteRepository(deleteRepositoryRequest); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertNull(request.getEntity()); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 1d0ea953cd5c1..02e03bfec764e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -19,7 +19,11 @@ package org.elasticsearch.client; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -29,6 +33,7 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Collections; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +45,6 @@ private PutRepositoryResponse createTestRepository(String repository, String typ request.type(type); return execute(request, highLevelClient().snapshot()::createRepository, highLevelClient().snapshot()::createRepositoryAsync); - } public void testCreateRepository() throws IOException { @@ -48,7 +52,7 @@ public void testCreateRepository() throws IOException { assertTrue(response.isAcknowledged()); } - public void testModulesGetRepositoriesUsingParams() throws IOException { + public void testSnapshotGetRepositoriesUsingParams() throws IOException { String testRepository = "test"; assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); @@ -60,7 +64,7 @@ public void testModulesGetRepositoriesUsingParams() throws IOException { assertThat(1, equalTo(response.repositories().size())); } - public void testModulesGetDefaultRepositories() throws IOException { + public void testSnapshotGetDefaultRepositories() throws IOException { assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); @@ -69,7 +73,7 @@ public void testModulesGetDefaultRepositories() throws IOException { assertThat(2, equalTo(response.repositories().size())); } - public void testModulesGetRepositoriesNonExistent() throws IOException { + public void testSnapshotGetRepositoriesNonExistent() { String repository = "doesnotexist"; GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository}); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(request, @@ -79,4 +83,23 @@ public void testModulesGetRepositoriesNonExistent() throws IOException { assertThat(exception.getMessage(), equalTo( "Elasticsearch exception [type=repository_missing_exception, reason=[" + repository + "] missing]")); } + + public void testSnapshotDeleteRepository() throws IOException { + String repository = "test"; + String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; + + highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, + Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + + GetRepositoriesRequest request = new GetRepositoriesRequest(); + GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, + highLevelClient().snapshot()::getRepositoriesAsync); + assertThat(1, equalTo(response.repositories().size())); + + DeleteRepositoryRequest deleteRequest = new DeleteRepositoryRequest(repository); + DeleteRepositoryResponse deleteResponse = execute(deleteRequest, highLevelClient().snapshot()::deleteRepository, + highLevelClient().snapshot()::deleteRepositoryAsync); + + assertTrue(deleteResponse.isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index c57f8e2a2fbd5..0a57fafe5be59 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -21,6 +21,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -235,6 +237,66 @@ public void onFailure(Exception e) { } } + public void testSnapshotDeleteRepository() throws IOException { + RestHighLevelClient client = highLevelClient(); + + createTestRepositories(); + + // tag::delete-repository-request + DeleteRepositoryRequest request = new DeleteRepositoryRequest(repositoryName); + // end::delete-repository-request + + // tag::delete-repository-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::delete-repository-request-masterTimeout + // tag::delete-repository-request-timeout + request.timeout(TimeValue.timeValueMinutes(1)); // <1> + request.timeout("1m"); // <2> + // end::delete-repository-request-timeout + + // tag::delete-repository-execute + DeleteRepositoryResponse response = client.snapshot().deleteRepository(request); + // end::delete-repository-execute + + // tag::delete-repository-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::delete-repository-response + assertTrue(acknowledged); + } + + public void testSnapshotDeleteRepositoryAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + DeleteRepositoryRequest request = new DeleteRepositoryRequest(); + + // tag::delete-repository-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(DeleteRepositoryResponse deleteRepositoryResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-repository-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-repository-execute-async + client.snapshot().deleteRepositoryAsync(request, listener); // <1> + // end::delete-repository-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + private void createTestRepositories() throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 92610239cae92..59b82e5bf9649 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -19,14 +19,17 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -36,13 +39,12 @@ * HTTP Request to Elasticsearch. */ public final class Request { - private static final Header[] NO_HEADERS = new Header[0]; private final String method; private final String endpoint; private final Map parameters = new HashMap<>(); + private final List

headers = new ArrayList<>(); private HttpEntity entity; - private Header[] headers = NO_HEADERS; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = HttpAsyncResponseConsumerFactory.DEFAULT; @@ -125,21 +127,19 @@ public HttpEntity getEntity() { } /** - * Set the headers to attach to the request. + * Add the provided header to the request. */ - public void setHeaders(Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - } - this.headers = headers; + public void addHeader(String name, String value) { + Objects.requireNonNull(name, "header name cannot be null"); + Objects.requireNonNull(value, "header value cannot be null"); + this.headers.add(new ReqHeader(name, value)); } /** * Headers to attach to the request. */ - public Header[] getHeaders() { - return headers; + List
getHeaders() { + return Collections.unmodifiableList(headers); } /** @@ -175,13 +175,13 @@ public String toString() { if (entity != null) { b.append(", entity=").append(entity); } - if (headers.length > 0) { + if (headers.size() > 0) { b.append(", headers="); - for (int h = 0; h < headers.length; h++) { + for (int h = 0; h < headers.size(); h++) { if (h != 0) { b.append(','); } - b.append(headers[h].toString()); + b.append(headers.get(h).toString()); } } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { @@ -204,12 +204,40 @@ public boolean equals(Object obj) { && endpoint.equals(other.endpoint) && parameters.equals(other.parameters) && Objects.equals(entity, other.entity) - && Arrays.equals(headers, other.headers) + && headers.equals(other.headers) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory); + return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory); + } + + /** + * Custom implementation of {@link BasicHeader} that overrides equals and hashCode. + */ + static final class ReqHeader extends BasicHeader { + + ReqHeader(String name, String value) { + super(name, value); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ReqHeader) { + Header otherHeader = (Header) other; + return Objects.equals(getName(), otherHeader.getName()) && + Objects.equals(getValue(), otherHeader.getValue()); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getValue()); + } } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 05fa4d536b3b6..33171e18e743d 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -215,7 +215,7 @@ public void performRequestAsync(Request request, ResponseListener responseListen @Deprecated public Response performRequest(String method, String endpoint, Header... headers) throws IOException { Request request = new Request(method, endpoint); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -237,7 +237,7 @@ public Response performRequest(String method, String endpoint, Header... headers public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { Request request = new Request(method, endpoint); addParameters(request, params); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -264,7 +264,7 @@ public Response performRequest(String method, String endpoint, Map requestHeaders) { // request headers override default headers, so we don't add default headers if they exist as request headers - final Set requestNames = new HashSet<>(requestHeaders.length); + final Set requestNames = new HashSet<>(requestHeaders.size()); for (Header requestHeader : requestHeaders) { httpRequest.addHeader(requestHeader); requestNames.add(requestHeader.getName()); @@ -877,10 +877,24 @@ private static class HostTuple { } } + /** + * Add all headers from the provided varargs argument to a {@link Request}. This only exists + * to support methods that exist for backwards compatibility. + */ + @Deprecated + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + Objects.requireNonNull(header, "header cannot be null"); + request.addHeader(header.getName(), header.getValue()); + } + } + /** * Add all parameters from a map to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. */ + @Deprecated private static void addParameters(Request request, Map parameters) { Objects.requireNonNull(parameters, "parameters cannot be null"); for (Map.Entry entry : parameters.entrySet()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index 6625c389c6be8..29bbf23a1f20e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -19,21 +19,21 @@ package org.elasticsearch.client; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; -import static org.junit.Assert.assertArrayEquals; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; @@ -127,31 +127,33 @@ public void testSetJsonEntity() throws IOException { assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); } - public void testSetHeaders() { + public void testAddHeader() { final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); Request request = new Request(method, endpoint); try { - request.setHeaders((Header[]) null); + request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("headers cannot be null", e.getMessage()); + assertEquals("header name cannot be null", e.getMessage()); } try { - request.setHeaders(new Header [] {null}); + request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("header cannot be null", e.getMessage()); + assertEquals("header value cannot be null", e.getMessage()); } - Header[] headers = new Header[between(0, 5)]; - for (int i = 0; i < headers.length; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + int numHeaders = between(0, 5); + List
headers = new ArrayList<>(); + for (int i = 0; i < numHeaders; i++) { + Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + headers.add(header); + request.addHeader(header.getName(), header.getValue()); } - request.setHeaders(headers); - assertArrayEquals(headers, request.getHeaders()); + assertEquals(headers, new ArrayList<>(request.getHeaders())); } public void testEqualsAndHashCode() { @@ -168,7 +170,7 @@ public void testEqualsAndHashCode() { assertNotEquals(mutant, request); } - private Request randomRequest() { + private static Request randomRequest() { Request request = new Request( randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), randomAsciiAlphanumOfLength(5)); @@ -192,11 +194,9 @@ private Request randomRequest() { if (randomBoolean()) { int headerCount = between(1, 5); - Header[] headers = new Header[headerCount]; for (int i = 0; i < headerCount; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); } - request.setHeaders(headers); } if (randomBoolean()) { @@ -206,13 +206,13 @@ private Request randomRequest() { return request; } - private Request copy(Request request) { + private static Request copy(Request request) { Request copy = new Request(request.getMethod(), request.getEndpoint()); copyMutables(request, copy); return copy; } - private Request mutate(Request request) { + private static Request mutate(Request request) { if (randomBoolean()) { // Mutate request or method but keep everything else constant Request mutant = randomBoolean() @@ -231,11 +231,7 @@ private Request mutate(Request request) { mutant.setJsonEntity("mutant"); // randomRequest can't produce this value return mutant; case 2: - if (mutant.getHeaders().length > 0) { - mutant.setHeaders(new Header[0]); - } else { - mutant.setHeaders(new BasicHeader("extra", "m")); - } + mutant.addHeader("extra", "m"); return mutant; case 3: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); @@ -245,12 +241,14 @@ private Request mutate(Request request) { } } - private void copyMutables(Request from, Request to) { + private static void copyMutables(Request from, Request to) { for (Map.Entry param : from.getParameters().entrySet()) { to.addParameter(param.getKey(), param.getValue()); } to.setEntity(from.getEntity()); - to.setHeaders(from.getHeaders()); + for (Header header : from.getHeaders()) { + to.addHeader(header.getName(), header.getValue()); + } to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 35cac627bbe6a..a3d0196dab9a8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -29,7 +29,6 @@ import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.TargetAuthenticationStrategy; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; @@ -379,7 +378,9 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); - request.setHeaders(headers); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 714d2e57e6d20..3811b60023b43 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -312,7 +312,7 @@ public void testBody() throws IOException { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeaders()}. */ @Deprecated public void tesPerformRequestOldStyleNullHeaders() throws IOException { @@ -333,7 +333,7 @@ public void tesPerformRequestOldStyleNullHeaders() throws IOException { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformRequestOldStyleWithNullParams() throws IOException { @@ -362,7 +362,9 @@ public void testHeaders() throws IOException { final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Request request = new Request(method, "/" + statusCode); - request.setHeaders(requestHeaders); + for (Header requestHeader : requestHeaders) { + request.addHeader(requestHeader.getName(), requestHeader.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); @@ -436,9 +438,9 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); - request.setHeaders(headers); for (Header header : headers) { - expectedRequest.addHeader(header); + request.addHeader(header.getName(), header.getValue()); + expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue())); uniqueNames.add(header.getName()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ea124828e45eb..15fa5c0f99596 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -27,11 +27,13 @@ import java.net.URI; import java.util.Collections; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -57,17 +59,20 @@ public void testPerformAsyncWithUnsupportedMethod() throws Exception { restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -81,17 +86,20 @@ public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -105,22 +113,25 @@ public void testPerformOldStyleAsyncWithNullParams() throws Exception { restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null parameters"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("parameters cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeader()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { @@ -129,18 +140,21 @@ public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { ResponseListener listener = new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null headers"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("header cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }; restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -150,17 +164,20 @@ public void testPerformAsyncWithWrongEndpoint() throws Exception { restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -174,17 +191,20 @@ public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 5ee97399b34e6..f3ce112fea1a1 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -27,9 +27,7 @@ import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.BasicHttpEntity; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.reactor.IOReactorConfig; @@ -52,8 +50,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; -import java.util.Collections; -import java.util.Map; import java.util.concurrent.CountDownLatch; /** @@ -176,9 +172,8 @@ public void onFailure(Exception exception) { request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter //tag::rest-client-headers - request.setHeaders( - new BasicHeader("Accept", "text/plain"), - new BasicHeader("Cache-Control", "no-cache")); + request.addHeader("Accept", "text/plain"); + request.addHeader("Cache-Control", "no-cache"); //end::rest-client-headers //tag::rest-client-response-consumer request.setHttpAsyncResponseConsumerFactory( diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java index a0a6641abbc5f..07bae6c17fdd2 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -100,7 +100,7 @@ static Header[] randomHeaders(Random random, final String baseName) { if (random.nextBoolean()) { headerName = headerName + i; } - headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); + headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10)); } return headers; } diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat new file mode 100644 index 0000000000000..efda5f653ef31 --- /dev/null +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -0,0 +1,22 @@ +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +if defined ES_ADDITIONAL_SOURCES ( + for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do ( + call %~dp0%%a + ) +) + +for /f "tokens=1*" %%a in ("%*") do ( + set main_class=%%a + set arguments=%%b +) + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + %main_class% ^ + %arguments% diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 1d6616983d8cc..9bd72a65745a9 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ - org.elasticsearch.common.settings.KeyStoreCli ^ - %* +call "%~dp0elasticsearch-cli.bat" ^ + org.elasticsearch.common.settings.KeyStoreCli ^ + %* ^ + || exit /b 1 endlocal endlocal diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index b3b94a31863f1..d46ef295d085b 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.plugins.PluginCli ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 492c1f0831263..37d96bbed6c4e 100644 --- a/distribution/src/bin/elasticsearch-translog.bat +++ b/distribution/src/bin/elasticsearch-translog.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.index.translog.TranslogToolCli ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index d6f6e36b8c48e..6aa9f43936a74 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -30,7 +30,6 @@ import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.hash.MessageDigests; @@ -240,7 +239,7 @@ private static void handleInstallXPack(final Build.Flavor flavor) throws UserExc /** Downloads the plugin and returns the file it was downloaded to. */ private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception { if (OFFICIAL_PLUGINS.contains(pluginId)) { - final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, pluginId, Platforms.PLATFORM_NAME); + final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME); terminal.println("-> Downloading " + pluginId + " from elastic"); return downloadZipAndChecksum(terminal, url, tmpDir, false); } @@ -272,22 +271,43 @@ String getStagingHash() { return System.getProperty(PROPERTY_STAGING_ID); } + boolean isSnapshot() { + return Build.CURRENT.isSnapshot(); + } + /** Returns the url for an official elasticsearch plugin. */ - private String getElasticUrl(Terminal terminal, String stagingHash, Version version, - String pluginId, String platform) throws IOException { + private String getElasticUrl( + final Terminal terminal, + final String stagingHash, + final Version version, + final boolean isSnapshot, + final String pluginId, + final String platform) throws IOException, UserException { final String baseUrl; + if (isSnapshot && stagingHash == null) { + throw new UserException( + ExitCodes.CONFIG, "attempted to install release build of official plugin on snapshot build of Elasticsearch"); + } if (stagingHash != null) { - baseUrl = String.format(Locale.ROOT, - "https://staging.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", version, stagingHash, pluginId); + if (isSnapshot) { + baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId); + } else { + baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId); + } } else { - baseUrl = String.format(Locale.ROOT, - "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); + baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); } - final String platformUrl = String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, version); + final String platformUrl = + String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, Version.displayVersion(version, isSnapshot)); if (urlExists(terminal, platformUrl)) { return platformUrl; } - return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, version); + return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Version.displayVersion(version, isSnapshot)); + } + + private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { + return String.format( + Locale.ROOT, "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", hostname, version, stagingHash, pluginId); } /** Returns the url for an elasticsearch plugin in maven. */ diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index bfeb3c0279b65..07fe4f5403ae6 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -800,7 +800,7 @@ private void installPlugin(MockTerminal terminal, boolean isBatch) throws Except skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2()); } - void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash, + void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash, boolean isSnapshot, String shaExtension, Function shaCalculator) throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); @@ -834,6 +834,12 @@ boolean urlExists(Terminal terminal, String urlString) throws IOException { String getStagingHash() { return stagingHash; } + + @Override + boolean isSnapshot() { + return isSnapshot; + } + @Override void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) throws Exception { // no jarhell check @@ -843,48 +849,82 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Pat assertPlugin(name, pluginDir, env.v2()); } - public void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash) throws Exception { + public void assertInstallPluginFromUrl( + final String pluginId, final String name, final String url, final String stagingHash, boolean isSnapshot) throws Exception { MessageDigest digest = MessageDigest.getInstance("SHA-512"); - assertInstallPluginFromUrl(pluginId, name, url, stagingHash, ".sha512", checksumAndFilename(digest, url)); + assertInstallPluginFromUrl(pluginId, name, url, stagingHash, isSnapshot, ".sha512", checksumAndFilename(digest, url)); } public void testOfficalPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); + } + + public void testOfficialPluginSnapshot() throws Exception { + String url = String.format( + Locale.ROOT, + "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", + Version.CURRENT, + Version.displayVersion(Version.CURRENT, true)); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true); + } + + public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { + String url = String.format( + Locale.ROOT, + "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", + Version.CURRENT, + Version.displayVersion(Version.CURRENT, true)); + // attemping to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception + final UserException e = + expectThrows(UserException.class, () -> assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, true)); + assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); + assertThat( + e, hasToString(containsString("attempted to install release build of official plugin on snapshot build of Elasticsearch"))); } public void testOfficalPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123"); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); } public void testOfficalPlatformPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" + Version.CURRENT + ".zip"; - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); + } + + public void testOfficialPlatformPluginSnapshot() throws Exception { + String url = String.format( + Locale.ROOT, + "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip", + Version.CURRENT, + Platforms.PLATFORM_NAME, + Version.displayVersion(Version.CURRENT, true)); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true); } public void testOfficalPlatformPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-"+ Version.CURRENT + ".zip"; - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123"); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); } public void testMavenPlugin() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false); } public void testMavenPlatformPlugin() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-" + Platforms.PLATFORM_NAME + "-1.0.0.zip"; - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false); } public void testMavenSha1Backcompat() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", checksum(digest)); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", checksum(digest)); assertTrue(terminal.getOutput(), terminal.getOutput().contains("sha512 not found, falling back to sha1")); } @@ -892,7 +932,7 @@ public void testOfficialShaMissing() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha1", checksum(digest))); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest))); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage()); } @@ -900,7 +940,7 @@ public void testOfficialShaMissing() throws Exception { public void testMavenShaMissing() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".dne", bytes -> null)); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".dne", bytes -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertEquals("Plugin checksum missing: " + url + ".sha1", e.getMessage()); } @@ -909,7 +949,7 @@ public void testInvalidShaFileMissingFilename() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha512", checksum(digest))); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha512", checksum(digest))); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file")); } @@ -923,6 +963,7 @@ public void testInvalidShaFileMismatchFilename() throws Exception { "analysis-icu", url, null, + false, ".sha512", checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip"))); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -938,6 +979,7 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { "analysis-icu", url, null, + false, ".sha512", checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar"))); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -952,6 +994,7 @@ public void testSha512Mismatch() throws Exception { "analysis-icu", url, null, + false, ".sha512", bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip")); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -961,7 +1004,7 @@ public void testSha512Mismatch() throws Exception { public void testSha1Mismatch() throws Exception { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", bytes -> "foobar")); + assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", bytes -> "foobar")); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("SHA-1 mismatch, expected foobar")); } diff --git a/docs/java-rest/high-level/snapshot/delete_repository.asciidoc b/docs/java-rest/high-level/snapshot/delete_repository.asciidoc new file mode 100644 index 0000000000000..e88535f2362a5 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/delete_repository.asciidoc @@ -0,0 +1,82 @@ +[[java-rest-high-snapshot-delete-repository]] +=== Snapshot Delete Repository API + +The Snapshot Delete Repository API allows to delete a registered repository. + +[[java-rest-high-snapshot-delete-repository-request]] +==== Snapshot Delete Repository Request + +A `DeleteRepositoryRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request] +-------------------------------------------------- + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-snapshot-delete-repository-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-delete-repository-async]] +==== Asynchronous Execution + +The asynchronous execution of a snapshot delete repository requires both the +`DeleteRepositoryRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-async] +-------------------------------------------------- +<1> The `DeleteRepositoryRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `DeleteRepositoryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-delete-repository-response]] +==== Snapshot Delete Repository Response + +The returned `DeleteRepositoryResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-response] +-------------------------------------------------- +<1> Indicates the node has acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index b00047359a5d7..d8ec67dade10b 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -114,6 +114,9 @@ include::cluster/list_tasks.asciidoc[] The Java High Level REST Client supports the following Snapshot APIs: * <> +* <> +* <> include::snapshot/get_repository.asciidoc[] include::snapshot/create_repository.asciidoc[] +include::snapshot/delete_repository.asciidoc[] diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 68367b9a64fdf..012ce418226cd 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,7 +271,7 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can set a list of headers to send with the request: +And you can add one or more headers to send with the request: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-casting.asciidoc index ec4f9919bd043..a3624f9083145 100644 --- a/docs/painless/painless-casting.asciidoc +++ b/docs/painless/painless-casting.asciidoc @@ -1,172 +1,456 @@ [[painless-casting]] === Casting -Casting is the conversion of one type to another. Implicit casts are casts that -occur automatically, such as during an assignment operation. Explicit casts are -casts where you use the casting operator to explicitly convert one type to -another. This is necessary during operations where the cast cannot be inferred. +A cast converts the value of an original type to the equivalent value of a +target type. An implicit cast infers the target type and automatically occurs +during certain <>. An explicit cast specifies +the target type and forcefully occurs as its own operation. Use the *cast +operator* to specify an explicit cast. -To cast to a new type, precede the expression by the new type enclosed in -parentheses, for example -`(int)x`. +*Errors* -The following sections specify the implicit casts that can be performed and the -explicit casts that are allowed. The only other permitted cast is casting -a single character `String` to a `char`. +* If during a cast there exists no equivalent value for the target type. +* If an implicit cast is given, but an explicit cast is required. -*Grammar:* +*Grammar* [source,ANTLR4] ---- cast: '(' TYPE ')' expression ---- -[[numeric-casting]] -==== Numeric Casting +*Examples* -The following table shows the allowed implicit and explicit casts between -numeric types. Read the table by row. To find out if you need to explicitly -cast from type A to type B, find the row for type A and scan across to the -column for type B. +* Valid casts. ++ +[source,Painless] +---- +<1> int i = (int)5L; +<2> Map m = new HashMap(); +<3> HashMap hm = (HashMap)m; +---- ++ +<1> declare `int i`; + explicit cast `long 5` to `int 5` -> `int 5`; + assign `int 5` to `i` +<2> declare `Map m`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + assign `Map reference` to `m` +<3> declare `HashMap hm`; + access `m` -> `Map reference`; + explicit cast `Map reference` to `HashMap reference` -> `HashMap reference`; + assign `HashMap reference` to `hm` + +[[numeric-type-casting]] +==== Numeric Type Casting -IMPORTANT: Explicit casts between numeric types can result in some data loss. A -smaller numeric type cannot necessarily accommodate the value from a larger -numeric type. You might also lose precision when casting from integer types -to floating point types. +A <> cast converts the value of an original +numeric type to the equivalent value of a target numeric type. A cast between +two numeric type values results in data loss when the value of the original +numeric type is larger than the target numeric type can accommodate. A cast +between an integer type value and a floating point type value can result in +precision loss. + +The allowed casts for values of each numeric type are shown as a row in the +following table: |==== -| | byte | short | char | int | long | float | double -| byte | | implicit | implicit | implicit | implicit | implicit | implicit -| short | explicit | | explicit | implicit | implicit | implicit | implicit -| char | explicit | explicit | | implicit | implicit | implicit | implicit -| int | explicit | explicit | explicit | | implicit | implicit | implicit -| long | explicit | explicit | explicit | explicit | | implicit | implicit -| float | explicit | explicit | explicit | explicit | explicit | | implicit +| | byte | short | char | int | long | float | double +| byte | | implicit | implicit | implicit | implicit | implicit | implicit +| short | explicit | | explicit | implicit | implicit | implicit | implicit +| char | explicit | explicit | | implicit | implicit | implicit | implicit +| int | explicit | explicit | explicit | | implicit | implicit | implicit +| long | explicit | explicit | explicit | explicit | | implicit | implicit +| float | explicit | explicit | explicit | explicit | explicit | | implicit | double | explicit | explicit | explicit | explicit | explicit | explicit | |==== +*Examples* + +* Valid numeric type casts. ++ +[source,Painless] +---- +<1> int a = 1; +<2> long b = a; +<3> short c = (short)b; +<4> double e = (double)a; +---- ++ +<1> declare `int a`; + assign `int 1` to `a` +<2> declare `long b`; + access `a` -> `int 1`; + implicit cast `int 1` to `long 1` -> `long 1`; + assign `long 1` to `b` +<3> declare `short c`; + access `b` -> `long 1`; + explicit cast `long 1` to `short 1` -> `short 1`; + assign `short 1` value to `c` +<4> declare `double e`; + access `a` -> `int 1`; + explicit cast `int 1` to `double 1.0`; + assign `double 1.0` to `e`; + (note the explicit cast is extraneous since an implicit cast is valid) ++ +* Invalid numeric type casts resulting in errors. ++ +[source,Painless] +---- +<1> int a = 1.0; // error +<2> int b = 2; +<3> byte c = b; // error +---- ++ +<1> declare `int i`; + *error* -> cannot implicit cast `double 1.0` to `int 1`; + (note an explicit cast is valid) +<2> declare `int b`; + assign `int 2` to `b` +<3> declare byte `c`; + access `b` -> `int 2`; + *error* -> cannot implicit cast `int 2` to `byte 2`; + (note an explicit cast is valid) + +[[reference-type-casting]] +==== Reference Type Casting + +A <> cast converts the value of an original +reference type to the equivalent value of a target reference type. An implicit +cast between two reference type values is allowed when the original reference +type is a descendant of the target type. An explicit cast between two reference +type values is allowed when the original type is a descendant of the target type +or the target type is a descendant of the original type. + +*Examples* + +* Valid reference type casts. ++ +[source,Painless] +---- +<1> List x; +<2> ArrayList y = new ArrayList(); +<3> x = y; +<4> y = (ArrayList)x; +<5> x = (List)y; +---- ++ +<1> declare `List x`; + assign default value `null` to `x` +<2> declare `ArrayList y`; + allocate `ArrayList` instance -> `ArrayList reference`; + assign `ArrayList reference` to `y`; +<3> access `y` -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `x`; + (note `ArrayList` is a descendant of `List`) +<4> access `x` -> `List reference`; + explicit cast `List reference` to `ArrayList reference` + -> `ArrayList reference`; + assign `ArrayList reference` to `y`; +<5> access `y` -> `ArrayList reference`; + explicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `x`; + (note the explicit cast is extraneous, and an implicit cast is valid) ++ +* Invalid reference type casts resulting in errors. ++ +[source,Painless] +---- +<1> List x = new ArrayList(); +<2> ArrayList y = x; // error +<3> Map m = (Map)x; // error +---- ++ +<1> declare `List x`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `x` +<2> declare `ArrayList y`; + access `x` -> `List reference`; + *error* -> cannot implicit cast `List reference` to `ArrayList reference`; + (note an explicit cast is valid since `ArrayList` is a descendant of `List`) +<3> declare `ArrayList y`; + access `x` -> `List reference`; + *error* -> cannot explicit cast `List reference` to `Map reference`; + (note no cast would be valid since neither `List` nor `Map` is a descendant + of the other) + +[[dynamic-type-casting]] +==== Dynamic Type Casting + +A <> cast converts the value of an original +`def` type to the equivalent value of any target type or converts the value of +any original type to the equivalent value of a target `def` type. + +An implicit cast from any original type value to a `def` type value is always +allowed. An explicit cast from any original type value to a `def` type value is +always allowed but never necessary. + +An implicit or explicit cast from an original `def` type value to +any target type value is allowed if and only if the cast is normally allowed +based on the current type value the `def` type value represents. + +*Examples* -Example(s) -[source,Java] ----- -int a = 1; // Declare int variable a and set it to the literal - // value 1 -long b = a; // Declare long variable b and set it to int variable - // a with an implicit cast to convert from int to long -short c = (short)b; // Declare short variable c, explicitly cast b to a - // short, and assign b to c -byte d = a; // ERROR: Casting an int to a byte requires an explicit - // cast -double e = (double)a; // Explicitly cast int variable a to a double and assign - // it to the double variable e. The explicit cast is - // allowed, but it is not necessary. ----- - -[[reference-casting]] -==== Reference Casting - -A reference type can be implicitly cast to another reference type as long as -the type being cast _from_ is a descendant of the type being cast _to_. A -reference type can be explicitly cast _to_ if the type being cast to is a -descendant of the type being cast _from_. - -*Examples:* -[source,Java] ----- -List x; // Declare List variable x -ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a - // newly allocated ArrayList [1] -x = y; // Assign Arraylist y to List x using an - // implicit cast -y = (ArrayList)x; // Explicitly cast List x to an ArrayList and - // assign it to ArrayList y -x = (List)y; // Set List x to ArrayList y using an explicit - // cast (the explicit cast is not necessary) -y = x; // ERROR: List x cannot be implicitly cast to - // an ArrayList, an explicit cast is required -Map m = y; // ERROR: Cannot implicitly or explicitly cast [2] - // an ArrayList to a Map, no relationship - // exists between the two types. ----- -[1] `ArrayList` is a descendant of the `List` type. -[2] `Map` is unrelated to the `List` and `ArrayList` types. - -[[def-type-casting]] -==== def Type Casting -All primitive and reference types can always be implicitly cast to -`def`. While it is possible to explicitly cast to `def`, it is not necessary. - -However, it is not always possible to implicitly cast a `def` to other -primitive and reference types. An explicit cast is required if an explicit -cast would normally be required between the non-def types. - - -*Examples:* -[source,Java] ----- -def x; // Declare def variable x and set it to null -x = 3; // Set the def variable x to the literal 3 with an implicit - // cast from int to def -double a = x; // Declare double variable a and set it to def variable x, - // which contains a double -int b = x; // ERROR: Results in a run-time error because an explicit cast is - // required to cast from a double to an int -int c = (int)x; // Declare int variable c, explicitly cast def variable x to an - // int, and assign x to c +* Valid dynamic type casts with any original type to a target `def` type. ++ +[source,Painless] ---- +<1> def d0 = 3; +<2> d0 = new ArrayList(); +<3> Object o = new HashMap(); +<4> def d1 = o; +<5> int i = d1.size(); +---- ++ +<1> declare `def d0`; + implicit cast `int 3` to `def`; + assign `int 3` to `d0` +<2> allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def`; + assign `def` to `d0` +<3> declare `Object o`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Object reference` + -> `Object reference`; + assign `Object reference` to `o` +<4> declare `def d1`; + access `o` -> `Object reference`; + implicit cast `Object reference` to `def` -> `def`; + assign `def` to `d1` +<5> declare `int i`; + access `d1` -> `def`; + implicit cast `def` to `HashMap reference` -> HashMap reference`; + call `size` on `HashMap reference` -> `int 0`; + assign `int 0` to `i`; + (note `def` was implicit cast to `HashMap reference` since `HashMap` is the + child-most descendant type value that the `def` type value + represents) ++ +* Valid dynamic type casts with an original `def` type to any target type. ++ +[source,Painless] +---- +<1> def d = 1.0; +<2> int i = (int)d; +<3> d = 1; +<4> float f = d; +<5> d = new ArrayList(); +<6> List l = d; +---- ++ +<1> declare `def d`; + implicit cast `double 1.0` to `def` -> `def`; + assign `def` to `d` +<2> declare `int i`; + access `d` -> `def`; + implicit cast `def` to `double 1.0` -> `double 1.0`; + explicit cast `double 1.0` to `int 1` -> `int 1`; + assign `int 1` to `i`; + (note the explicit cast is necessary since a `double` value cannot be + converted to an `int` value implicitly) +<3> assign `int 1` to `d`; + (note the switch in the type `d` represents from `double` to `int`) +<4> declare `float i`; + access `d` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + implicit cast `int 1` to `float 1.0` -> `float 1.0`; + assign `float 1.0` to `f` +<5> allocate `ArrayList` instance -> `ArrayList reference`; + assign `ArrayList reference` to `d`; + (note the switch in the type `d` represents from `int` to `ArrayList`) +<6> declare `List l`; + access `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `l` ++ +* Invalid dynamic type casts resulting in errors. ++ +[source,Painless] +---- +<1> def d = 1; +<2> short s = d; // error +<3> d = new HashMap(); +<4> List l = d; // error +---- +<1> declare `def d`; + implicit cast `int 1` to `def` -> `def`; + assign `def` to `d` +<2> declare `short s`; + access `d` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + *error* -> cannot implicit cast `int 1` to `short 1`; + (note an explicit cast is valid) +<3> allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def`; + assign `def` to `d` +<4> declare `List l`; + access `d` -> `def`; + implicit cast `def` to `HashMap reference`; + *error* -> cannot implicit cast `HashMap reference` to `List reference`; + (note no cast would be valid since neither `HashMap` nor `List` is a + descendant of the other) + +[[string-character-casting]] +==== String to Character Casting + +Use the *cast operator* to convert a <> value into a +<> value. + +*Errors* + +* If the `String` type value isn't one character in length. +* If the `String` type value is `null`. + +*Examples* + +* Casting string literals into `char` type values. ++ +[source,Painless] +---- +<1> char c = (char)"C" +<2> c = (char)'c' +---- ++ +<1> declare `char c`; + explicit cast `String "C"` to `char C` -> `char C`; + assign `char C` to `c` +<2> explicit cast `String 'c'` to `char c` -> `char c`; + assign `char c` to `c` ++ +* Casting a `String` reference into a `char` value. ++ +[source,Painless] +---- +<1> String s = "s"; +<2> char c = (char)s; +---- +<1> declare `String s`; + assign `String "s"` to `s`; +<2> declare `char c` + access `s` -> `String "s"`; + explicit cast `String "s"` to `char s` -> `char s`; + assign `char s` to `c` [[boxing-unboxing]] ==== Boxing and Unboxing -Boxing is where a cast is used to convert a primitive type to its corresponding -reference type. Unboxing is the reverse, converting a reference type to the -corresponding primitive type. +Boxing is a special type of cast used to convert a primitive type to its +corresponding reference type. Unboxing is the reverse used to convert a +reference type to its corresponding primitive type. + +Implicit boxing/unboxing occurs during the following operations: + +* Conversions between a `def` type and a primitive type will be implicitly + boxed/unboxed as necessary, though this is referred to as an implicit cast + throughout the documentation. +* Method/function call arguments will be implicitly boxed/unboxed as necessary. +* A primitive type value will be implicitly boxed when a reference type method + call is invoked on it. -There are two places Painless performs implicit boxing and unboxing: +Explicit boxing/unboxing is not allowed. Use the reference type API to +explicitly convert a primitive type value to its respective reference type +value and vice versa. -* When you call methods, Painless automatically boxes and unboxes arguments -so you can specify either primitive types or their corresponding reference -types. -* When you use the `def` type, Painless automatically boxes and unboxes as -needed when converting to and from `def`. +*Errors* -The casting operator does not support any way to explicitly box a primitive -type or unbox a reference type. +* If an explicit cast is made to box/unbox a primitive type. -If a primitive type needs to be converted to a reference type, the Painless -reference type API supports methods that can do that. However, under normal -circumstances this should not be necessary. +*Examples* -*Examples:* -[source,Java] +* Uses of implicit boxing/unboxing. ++ +[source,Painless] ---- -Integer x = 1; // ERROR: not a legal implicit cast -Integer y = (Integer)1; // ERROR: not a legal explicit cast -int a = new Integer(1); // ERROR: not a legal implicit cast -int b = (int)new Integer(1); // ERROR: not a legal explicit cast +<1> List l = new ArrayList(); +<2> l.add(1); +<3> Integer I = Integer.valueOf(0); +<4> int i = l.get(i); ---- ++ +<1> declare `List l`; + allocate `ArrayList` instance -> `ArrayList reference`; + assign `ArrayList reference` to `l`; +<2> access `l` -> `List reference`; + implicit cast `int 1` to `def` -> `def`; + call `add` on `List reference` with arguments (`def`); + (note internally `int 1` is boxed to `Integer 1` to store as a `def` type + value) +<3> declare `Integer I`; + call `valueOf` on `Integer` with arguments of (`int 0`) -> `Integer 0`; + assign `Integer 0` to `I`; +<4> declare `int i`; + access `I` -> `Integer 0`; + unbox `Integer 0` -> `int 0`; + access `l` -> `List reference`; + call `get` on `List reference` with arguments (`int 0`) -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + assign `int 1` to `i`; + (note internally `int 1` is unboxed from `Integer 1` when loaded from a + `def` type value) ++ +* Uses of invalid boxing/unboxing resulting in errors. ++ +[source,Painless] +---- +<1> Integer x = 1; // error +<2> Integer y = (Integer)1; // error +<3> int a = Integer.valueOf(1); // error +<4> int b = (int)Integer.valueOf(1); // error +---- ++ +<1> declare `Integer x`; + *error* -> cannot implicit box `int 1` to `Integer 1` during assignment +<2> declare `Integer y`; + *error* -> cannot explicit box `int 1` to `Integer 1` during assignment +<3> declare `int a`; + call `valueOf` on `Integer` with arguments of (`int 1`) -> `Integer 1`; + *error* -> cannot implicit unbox `Integer 1` to `int 1` during assignment +<4> declare `int a`; + call `valueOf` on `Integer` with arguments of (`int 1`) -> `Integer 1`; + *error* -> cannot explicit unbox `Integer 1` to `int 1` during assignment [[promotion]] ==== Promotion -Promotion is where certain operations require types to be either a minimum -numerical type or for two (or more) types to be equivalent. -The documentation for each operation that has these requirements -includes promotion tables that describe how this is handled. +Promotion is when a single value is implicitly cast to a certain type or +multiple values are implicitly cast to the same type as required for evaluation +by certain operations. Each operation that requires promotion has a promotion +table that shows all required implicit casts based on the type(s) of value(s). A +value can be promoted to a `def` type at compile-time; however, the promoted +type value is derived from what the `def` type value represents at run-time. -When an operation promotes a type or types, the resultant type -of the operation is the promoted type. Types can be promoted to def -at compile-time; however, at run-time, the resultant type will be the -promotion of the types the `def` is representing. +*Errors* -*Examples:* -[source,Java] ----- -2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal - // 2 is promoted to a double and the resulting value is a double. +* If a specific operation cannot find an allowed promotion type for the type(s) + of value(s) given. -def x = 1; // Declare def variable x and set it to the literal int 1 through - // an implicit cast -x + 2.0F // Add def variable x and the literal float 2.0. - // At compile-time the types are promoted to def. - // At run-time the types are promoted to float. +*Examples* + +* Uses of promotion. ++ +[source,Painless] +---- +<1> double d = 2 + 2.0; +<2> def x = 1; +<3> float f = x + 2.0F; ---- +<1> declare `double d`; + promote `int 2` and `double 2.0 @0` -> `double 2.0 @0`; + implicit cast `int 2` to `double 2.0 @1` -> `double 2.0 @1`; + add `double 2.0 @1` and `double 2.0 @0` -> `double 4.0`; + assign `double 4.0` to `d` +<2> declare `def x`; + implicit cast `int 1` to `def` -> `def`; + assign `def` to `x`; +<3> declare `float f`; + access `x` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + promote `int 1` and `float 2.0` -> `float 2.0`; + implicit cast `int 1` to `float 1.0` -> `float `1.0`; + add `float 1.0` and `float 2.0` -> `float 3.0`; + assign `float 3.0` to `f`; + (note this example illustrates promotion done at run-time as promotion + done at compile-time would have resolved to a `def` type value) diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-comments.asciidoc index 588e464d97f78..bde30e37e04e1 100644 --- a/docs/painless/painless-comments.asciidoc +++ b/docs/painless/painless-comments.asciidoc @@ -1,12 +1,12 @@ [[painless-comments]] === Comments -Use the `//` token anywhere on a line to specify a single-line comment. All -characters from the `//` token to the end of the line are ignored. Use an -opening `/*` token and a closing `*/` token to specify a multi-line comment. -Multi-line comments can start anywhere on a line, and all characters in between -the `/*` token and `*/` token are ignored. Comments can be included anywhere -within a script. +Use a comment to annotate or explain code within a script. Use the `//` token +anywhere on a line to specify a single-line comment. All characters from the +`//` token to the end of the line are ignored. Use an opening `/*` token and a +closing `*/` token to specify a multi-line comment. Multi-line comments can +start anywhere on a line, and all characters in between the `/*` token and `*/` +token are ignored. Comments can be included anywhere within a script. *Grammar* [source,ANTLR4] diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 3344bd9f75132..a3ac5b578d781 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -11,7 +11,7 @@ The Painless execute API allows an arbitrary script to be executed and a result |====== | Name | Required | Default | Description | `script` | yes | - | The script to execute -| `context` | no | `execute_api_script` | The context the script should be executed in. +| `context` | no | `painless_test` | The context the script should be executed in. |====== ==== Contexts diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-identifiers.asciidoc index 17073e3d4c415..7762f56cb7b23 100644 --- a/docs/painless/painless-identifiers.asciidoc +++ b/docs/painless/painless-identifiers.asciidoc @@ -1,10 +1,10 @@ [[painless-identifiers]] === Identifiers -Specify identifiers to <>, <>, and -<> variables, <>, and -<>. <> and -<> cannot be used as identifiers. +Use an identifier as a named token to specify a +<>, <>, +<>, <>, or function. +<> cannot be used as identifiers. *Grammar* [source,ANTLR4] diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-keywords.asciidoc index cb3bafbd20f13..39a2201fd2b4b 100644 --- a/docs/painless/painless-keywords.asciidoc +++ b/docs/painless/painless-keywords.asciidoc @@ -1,9 +1,9 @@ [[painless-keywords]] === Keywords -The keywords in the table below are reserved for built-in language -features. These keywords cannot be used as -<> or <>. +Keywords are reserved tokens for built-in language features and cannot be used +as <> within a script. The following are +keywords: [cols="^1,^1,^1,^1,^1"] |==== diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index ba6595000ae2f..5e6b84d8c57d1 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -6,7 +6,7 @@ Painless syntax is similar to Java syntax along with some additional features such as dynamic typing, Map and List accessor shortcuts, and array initializers. As a direct comparison to Java, there are some important differences, especially related to the casting model. For more detailed -conceptual information about the basic constructs that Java and Painless share, +conceptual information about the basic constructs that Painless and Java share, refer to the corresponding topics in the https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language Specification]. diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-literals.asciidoc index 441cb264f1e15..ebf7eaa07b657 100644 --- a/docs/painless/painless-literals.asciidoc +++ b/docs/painless/painless-literals.asciidoc @@ -1,18 +1,19 @@ [[painless-literals]] === Literals -Use literals to specify different types of values directly in a script. +Use a literal to specify a value directly in an +<>. [[integers]] ==== Integers -Use integer literals to specify an integer value in decimal, octal, or hex -notation of the <> `int`, `long`, `float`, +Use an integer literal to specify an integer type value in decimal, octal, or +hex notation of a <> `int`, `long`, `float`, or `double`. Use the following single letter designations to specify the -<>: `l` or `L` for `long`, `f` or `F` for -`float`, and `d` or `D` for `double`. If not specified, the type defaults to -`int`. Use `0` as a prefix to specify an integer literal as octal, and use -`0x` or `0X` as a prefix to specify an integer literal as hex. +primitive type: `l` or `L` for `long`, `f` or `F` for `float`, and `d` or `D` +for `double`. If not specified, the type defaults to `int`. Use `0` as a prefix +to specify an integer literal as octal, and use `0x` or `0X` as a prefix to +specify an integer literal as hex. *Grammar* [source,ANTLR4] @@ -46,11 +47,10 @@ HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?; [[floats]] ==== Floats -Use floating point literals to specify a floating point value of the -<> `float` or `double`. Use the following -single letter designations to specify the <>: -`f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults -to `double`. +Use a floating point literal to specify a floating point type value of a +<> `float` or `double`. Use the following +single letter designations to specify the primitive type: `f` or `F` for `float` +and `d` or `D` for `double`. If not specified, the type defaults to `double`. *Grammar* [source,ANTLR4] @@ -81,7 +81,7 @@ EXPONENT: ( [eE] [+\-]? [0-9]+ ); [[strings]] ==== Strings -Use string literals to specify <> values with +Use a string literal to specify a <> value with either single-quotes or double-quotes. Use a `\"` token to include a double-quote as part of a double-quoted string literal. Use a `\'` token to include a single-quote as part of a single-quoted string literal. Use a `\\` @@ -117,26 +117,6 @@ STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) [[characters]] ==== Characters -Use the <> to convert string literals or -<> values into <> values. -<> values converted into -<> values must be exactly one character in length -or an error will occur. - -*Examples* - -* Casting string literals into <> values. -+ -[source,Painless] ----- -(char)"C" -(char)'c' ----- -+ -* Casting a <> value into a <> value. -+ -[source,Painless] ----- -String s = "s"; -char c = (char)s; ----- +A character literal cannot be specified directly. Instead, use the +<> to convert a `String` type value +into a `char` type value. diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-operators.asciidoc index 915d811fa441b..8329686f663af 100644 --- a/docs/painless/painless-operators.asciidoc +++ b/docs/painless/painless-operators.asciidoc @@ -240,6 +240,7 @@ operator. See Function Calls [MARK] for more information. The brackets operator `[]` is used to create and access arrays, lists, and maps. The braces operator `{}` is used to intialize arrays. +[[array-initialization]] ===== Creating and Initializing Arrays You create and initialize arrays using the brackets `[]` and braces `{}` @@ -248,9 +249,49 @@ initialize each dimension with are specified as a comma-separated list enclosed in braces. For example, `new int[] {1, 2, 3}` creates a one dimensional `int` array with a size of 3 and the values 1, 2, and 3. -For more information about allocating and initializing arrays, see <>. +To allocate an array, you use the `new` keyword followed by the type and a +set of brackets for each dimension. You can explicitly define the size of each dimension by specifying an expression within the brackets, or initialize each +dimension with the desired number of values. The allocated size of each +dimension is its permanent size. +To initialize an array, specify the values you want to initialize +each dimension with as a comma-separated list of expressions enclosed in braces. +For example, `new int[] {1, 2, 3}` creates a one-dimensional `int` array with a +size of 3 and the values 1, 2, and 3. + +When you initialize an array, the order of the expressions is maintained. Each expression used as part of the initialization is converted to the +array's type. An error occurs if the types do not match. + +*Grammar:* +[source,ANTLR4] +---- +declare_array: TYPE ('[' ']')+; + +array_initialization: 'new' TYPE '[' ']' '{' expression (',' expression) '}' + | 'new' TYPE '[' ']' '{' '}'; +---- + +*Examples:* +[source,Java] +---- +int[] x = new int[5]; // Declare int array x and assign it a newly + // allocated int array with a size of 5 +def[][] y = new def[5][5]; // Declare the 2-dimensional def array y and + // assign it a newly allocated 2-dimensional + // array where both dimensions have a size of 5 +int[] x = new int[] {1, 2, 3}; // Declare int array x and set it to an int + // array with values 1, 2, 3 and a size of 3 +int i = 1; +long l = 2L; +float f = 3.0F; +double d = 4.0; +String s = "5"; +def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to + // a def array with a size of 4 and the + // values i, l, f*d, and s +---- + +[[array-access]] ===== Accessing Array Elements Elements in an array are stored and accessed using the brackets `[]` operator. @@ -298,6 +339,7 @@ return d[z]; // Access the 1st element of array d using the NOTE: The use of the `def` type in the second example means that the types cannot be resolved until runtime. +[[array-length]] ===== Array Length Arrays contain a special member known as 'length' that is a read-only value that contains the size of the array. This member can be accessed from an array using the dot operator. @@ -727,6 +769,7 @@ def e; // declares the def variable e e = new HashMap(m); // sets e to a newly allocated HashMap using the constructor with a single argument m ---- +[[new-array]] ==== New Array An array type instance can be allocated using the new operator. The format starts with the new operator followed by the type followed by a series of opening and closing braces each containing an expression for the size of the dimension. diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 9d575a2069ae3..a897b8e8a04f0 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -1,269 +1,466 @@ [[painless-types]] === Types -Painless supports both dynamic and static types. Static types are split into -_primitive types_ and _reference types_. - -[[dynamic-types]] -==== Dynamic Types - -Painless supports one dynamic type: `def`. The `def` type can represent any -primitive or reference type. When you use the `def` type, it mimics the exact -behavior of whatever type it represents at runtime. The default value for the -def type is `null.` - -Internally, if the `def` type represents a primitive type, it is converted to the -corresponding reference type. It still behaves like the primitive type, however, -including within the casting model. The `def` type can be assigned to different -types during the course of script execution. - -IMPORTANT: Because a `def` type variable can be assigned to different types -during execution, type conversion errors that occur when using the `def` type -happen at runtime. - -Using the `def` type can have a slight impact on performance. If performance is -critical, it's better to declare static types. - -*Examples:* -[source,Java] ----- -def x = 1; // Declare def variable x and set it to the - // literal int 1 -def l = new ArrayList(); // Declare def variable l and set it a newly - // allocated ArrayList ----- +A type is a classification of data used to define the properties of a value. +These properties specify what data a value represents and the rules for how a +value is evaluated during an <>. Each type +belongs to one of the following categories: <>, +<>, or <>. [[primitive-types]] ==== Primitive Types -Primitive types are allocated directly onto the stack according to the standard -Java memory model. - -Primitive types can behave as their corresponding (<>) -reference type. This means any piece of a reference type can be accessed or -called through the primitive type. Operations performed in this manner convert -the primitive type to its corresponding reference type at runtime and perform -the field access or method call without needing to perform any other -operations. - -Painless supports the following primitive types. - -byte:: -An 8-bit, signed, two's complement integer. -Range: [-128, 127]. -Default value: 0. -Reference type: Byte. - -short:: -A 16-bit, signed, two's complement integer. -Range: [-32768, 32767]. -Default value: 0. -Reference type: Short. - -char:: -A 16-bit Unicode character. -Range: [0, 65535]. -Default value: 0 or `\u0000`. -Reference type: Character. - -int:: -A 32-bit, signed, two's complement integer. -Range: [-2^32, 2^32-1]. -Default value: 0. -Reference type: Integer. - -long:: -A 64-bit, signed, two's complement integer. -Range: [-2^64, 2^64-1]. -Default value: 0. -Reference type: Long. - -float:: -A 32-bit, single-precision, IEEE 754 floating point number. -Range: Depends on multiple factors. -Default value: 0.0. -Reference type: Float. - -double:: -A 64-bit, double-precision, IEEE 754 floating point number. -Range: Depends on multiple factors. -Default value: 0.0. -Reference type: Double. - -boolean:: -A logical quanity with two possible values: true and false. -Range: true/false. -Default value: false. -Reference type: Boolean. - - -*Examples:* -[source,Java] +A primitive type represents basic data built natively into the JVM and is +allocated to non-heap memory. Declare a primitive type +<>, and assign it a primitive type value for +evaluation during later operations. The default value for a newly-declared +primitive type variable is listed as part of the definitions below. A primitive +type value is copied during an assignment or as an argument for a +method/function call. + +A primitive type has a corresponding reference type (also known as a boxed +type). Use the <> or +<> on a primitive type value to force +evaluation as its corresponding reference type value. + +The following primitive types are available: + +[horizontal] +`byte`:: +8-bit, signed, two's complement integer +* range: [`-128`, `127`] +* default value: `0` +* reference type: `Byte` + +`short`:: +16-bit, signed, two's complement integer +* range: [`-32768`, `32767`] +* default value: `0` +* reference type: `Short` + +`char`:: +16-bit, unsigned, Unicode character +* range: [`0`, `65535`] +* default value: `0` or `\u0000` +* reference type: `Character` + +`int`:: +32-bit, signed, two's complement integer +* range: [`-2^32`, `2^32-1`] +* default value: `0` +* reference type: `Integer` + +`long`:: +64-bit, signed, two's complement integer +* range: [`-2^64`, `2^64-1`] +* default value: `0` +* reference type: `Long` + +`float`:: +32-bit, signed, single-precision, IEEE 754 floating point number +* default value: `0.0` +* reference type: `Float` + +`double`:: +64-bit, signed, double-precision, IEEE 754 floating point number +* default value: `0.0` +* reference type: `Double` + +`boolean`:: +logical quantity with two possible values of `true` and `false` +* default value: `false` +* reference type: `Boolean` + +*Examples* + +* Primitive types used in declaration, declaration and assignment. ++ +[source,Painless] ---- -int i = 1; // Declare variable i as an int and set it to the - // literal 1 -double d; // Declare variable d as a double and set it to the - // default value of 0.0 -boolean b = true; // Declare variable b as a boolean and set it to true +<1> int i = 1; +<2> double d; +<3> boolean b = true; ---- - -Using methods from the corresponding reference type on a primitive type. - -[source,Java] ++ +<1> declare `int i`; + assign `int 1` to `i` +<2> declare `double d`; + assign default `double 0.0` to `d` +<3> declare `boolean b`; + assign `boolean true` to `b` ++ +* Method call on a primitive type using the corresponding reference type. ++ +[source,Painless] ---- -int i = 1; // Declare variable i as an int and set it to the - // literal 1 -i.toString(); // Invokes the Integer method toString on variable i +<1> int i = 1; +<2> i.toString(); ---- ++ +<1> declare `int i`; + assign `int 1` to `i` +<2> access `i` -> `int 1`; + box `int 1` -> `Integer 1 reference`; + call `toString` on `Integer 1 reference` -> `String '1'` [[reference-types]] ==== Reference Types -Reference types are similar to Java classes and can contain multiple pieces -known as _members_. However, reference types do not support access modifiers. -You allocate reference type instances on the heap using the `new` operator. - -Reference types can have both static and non-static members: - -* Static members are shared by all instances of the same reference type and -can be accessed without allocating an instance of the reference type. For -example `Integer.MAX_VALUE`. -* Non-static members are specific to an instance of the reference type -and can only be accessed through the allocated instance. - -The default value for a reference type is `null`, indicating that no memory has -been allocated for it. When you assign `null` to a reference type, its previous -value is discarded and garbage collected in accordance with the Java memory -model as long as there are no other references to that value. - -A reference type can contain: - -* Zero to many primitive types. Primitive type members can be static or -non-static and read-only or read-write. -* Zero to many reference types. Reference type members can be static or -non-static and read-only or read-write. -* Methods that call an internal function to return a value and/or manipulate -the primitive or reference type members. Method members can be static or -non-static. -* Constructors that call an internal function to return a newly-allocated -reference type instance. Constructors are non-static methods that can -optionally manipulate the primitive and reference type members. - -Reference types support a Java-style inheritance model. Consider types A and B. -Type A is considered to be a parent of B, and B a child of A, if B inherits -(is able to access as its own) all of A's fields and methods. Type B is +A reference type is a named construct (object), potentially representing +multiple pieces of data (member fields) and logic to manipulate that data +(member methods), defined as part of the application programming interface +(API) for scripts. + +A reference type instance is a single set of data for one reference type +object allocated to the heap. Use the +<> to allocate a reference type +instance. Use a reference type instance to load from, store to, and manipulate +complex data. + +A reference type value refers to a reference type instance, and multiple +reference type values may refer to the same reference type instance. A change to +a reference type instance will affect all reference type values referring to +that specific instance. + +Declare a reference type <>, and assign it a +reference type value for evaluation during later operations. The default value +for a newly-declared reference type variable is `null`. A reference type value +is shallow-copied during an assignment or as an argument for a method/function +call. Assign `null` to a reference type variable to indicate the reference type +value refers to no reference type instance. The JVM will garbage collect a +reference type instance when it is no longer referred to by any reference type +values. Pass `null` as an argument to a method/function call to indicate the +argument refers to no reference type instance. + +A reference type object defines zero-to-many of each of the following: + +static member field:: + +A static member field is a named and typed piece of data. Each reference type +*object* contains one set of data representative of its static member fields. +Use the <> in correspondence with the +reference type object name to access a static member field for loading and +storing to a specific reference type *object*. No reference type instance +allocation is necessary to use a static member field. + +non-static member field:: + +A non-static member field is a named and typed piece of data. Each reference +type *instance* contains one set of data representative of its reference type +object's non-static member fields. Use the +<> for loading and storing to a non-static +member field of a specific reference type *instance*. An allocated reference +type instance is required to use a non-static member field. + +static member method:: + +A static member method is a function called on a reference type *object*. Use +the <> in correspondence with the reference +type object name to call a static member method. No reference type instance +allocation is necessary to use a static member method. + +non-static member method:: + +A non-static member method is a function called on a reference type *instance*. +A non-static member method called on a reference type instance can load from and +store to non-static member fields of that specific reference type instance. Use +the <> in correspondence with a specific +reference type instance to call a non-static member method. An allocated +reference type instance is required to use a non-static member method. + +constructor:: + +A constructor is a special type of function used to allocate a reference type +*instance* defined by a specific reference type *object*. Use the +<> to allocate a reference type +instance. + +A reference type object follows a basic inheritance model. Consider types A and +B. Type A is considered to be a parent of B, and B a child of A, if B inherits +(is able to access as its own) all of A's non-static members. Type B is considered a descendant of A if there exists a recursive parent-child relationship from B to A with none to many types in between. In this case, B -inherits all of A's fields and methods along with all of the fields and -methods of the types in between. Type B is also considered to be a type A -in both relationships. - -For the complete list of Painless reference types and their supported methods, -see the https://www.elastic.co/guide/en/elasticsearch/reference/current/painless-api-reference.html[Painless API Reference]. +inherits all of A's non-static members along with all of the non-static members +of the types in between. Type B is also considered to be a type A in both +relationships. -For more information about working with reference types, see -<> and <>. +*Examples* -*Examples:* -[source,Java] +* Reference types evaluated in several different operations. ++ +[source,Painless] +---- +<1> List l = new ArrayList(); +<2> l.add(1); +<3> int i = l.get(0) + 2; +---- ++ +<1> declare `List l`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `l` +<2> access `l` -> `List reference`; + implicit cast `int 1` to `def` -> `def` + call `add` on `List reference` with arguments (`def`) +<3> declare `int i`; + access `l` -> `List reference`; + call `get` on `List reference` with arguments (`int 0`) -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + add `int 1` and `int 2` -> `int 3`; + assign `int 3` to `i` ++ +* Sharing a reference type instance. ++ +[source,Painless] ---- -ArrayList al = new ArrayList(); // Declare variable al as an ArrayList and - // set it to a newly allocated ArrayList -List l = new ArrayList(); // Declare variable l as a List and set - // it to a newly allocated ArrayList, which is - // allowed because ArrayList inherits from List -Map m; // Declare variable m as a Map and set it - // to the default value of null +<1> List l0 = new ArrayList(); +<2> List l1 = l0; +<3> l0.add(1); +<4> l1.add(2); +<5> int i = l1.get(0) + l0.get(1); ---- ++ +<1> declare `List l0`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `l0` +<2> declare `List l1`; + access `l0` -> `List reference`; + assign `List reference` to `l1` + (note `l0` and `l1` refer to the same instance known as a shallow-copy) +<3> access `l0` -> `List reference`; + implicit cast `int 1` to `def` -> `def` + call `add` on `List reference` with arguments (`def`) +<4> access `l1` -> `List reference`; + implicit cast `int 2` to `def` -> `def` + call `add` on `List reference` with arguments (`def`) +<5> declare `int i`; + access `l0` -> `List reference`; + call `get` on `List reference` with arguments (`int 0`) -> `def @0`; + implicit cast `def @0` to `int 1` -> `int 1`; + access `l1` -> `List reference`; + call `get` on `List reference` with arguments (`int 1`) -> `def @1`; + implicit cast `def @1` to `int 2` -> `int 2`; + add `int 1` and `int 2` -> `int 3`; + assign `int 3` to `i`; ++ +* Using the static members of a reference type. ++ +[source,Painless] +---- +<1> int i = Integer.MAX_VALUE; +<2> long l = Long.parseLong("123L"); +---- ++ +<1> declare `int i`; + access `MAX_VALUE` on `Integer` -> `int 2147483647`; + assign `int 2147483647` to `i` +<2> declare `long l`; + call `parseLong` on `Long` with arguments (`long 123`) -> `long 123`; + assign `long 123` to `l` + +[[dynamic-types]] +==== Dynamic Types + +A dynamic type value can represent the value of any primitive type or +reference type using a single type name `def`. A `def` type value mimics +the behavior of whatever value it represents at run-time and will always +represent the child-most descendant type value of any type value when evaluated +during operations. + +Declare a `def` type <>, and assign it +any type of value for evaluation during later operations. The default value +for a newly-declared `def` type variable is `null`. A `def` type variable or +method/function parameter can change the type it represents during the +compilation and evaluation of a script. + +Using the `def` type can have a slight impact on performance. Use only primitive +types and reference types directly when performance is critical. + +*Errors* -Directly accessing static pieces of a reference type. +* If a `def` type value represents an inappropriate type for evaluation of an + operation at run-time. -[source,Java] +*Examples* + +* General uses of the `def` type. ++ +[source,Painless] +---- +<1> def dp = 1; +<2> def dr = new ArrayList(); +<3> dr = dp; +---- ++ +<1> declare `def dp`; + implicit cast `int 1` to `def` -> `def`; + assign `def` to `dp` +<2> declare `def dr`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def`; + assign `def` to `dr` +<3> access `dp` -> `def`; + assign `def` to `dr`; + (note the switch in the type `dr` represents from `ArrayList` to `int`) ++ +* A `def` type value representing the child-most descendant of a value. ++ +[source,Painless] ---- -Integer.MAX_VALUE // a static field access -Long.parseLong("123L") // a static function call +<1> Object l = new ArrayList(); +<2> def d = l; +<3> d.ensureCapacity(10); ---- ++ +<1> declare `Object l`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `Object reference` + -> `Object reference`; + assign `Object reference` to `l` +<2> declare `def d`; + access `l` -> `Object reference`; + implicit cast `Object reference` to `def` -> `def`; + assign `def` to `d`; +<3> access `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `ensureCapacity` on `ArrayList reference` with arguments (`int 10`); + (note `def` was implicit cast to `ArrayList reference` + since ArrayList` is the child-most descendant type value that the + `def` type value represents) [[string-type]] ==== String Type -A `String` is a specialized reference type that is immutable and does not have -to be explicitly allocated. You can directly assign to a `String` without first -allocating it with the `new` keyword. (Strings can be allocated with the `new` -keyword, but it's not required.) +The `String` type is a specialized reference type that does not require +explicit allocation. Use a <> to directly evaluate a +`String` type value. While not required, the +<> can allocate `String` type +instances. -When assigning a value to a `String`, you must enclose the text in single or -double quotes. Strings are allocated according to the standard Java Memory Model. -The default value for a `String` is `null.` +*Examples* -*Examples:* -[source,Java] +* General use of the `String` type. ++ +[source,Painless] ---- -String r = "some text"; // Declare String r and set it to the - // String "some text" -String s = 'some text'; // Declare String s and set it to the - // String 'some text' -String t = new String("some text"); // Declare String t and set it to the - // String "some text" -String u; // Declare String u and set it to the - // default value null +<1> String r = "some text"; +<2> String s = 'some text'; +<3> String t = new String("some text"); +<4> String u; ---- ++ +<1> declare `String r`; + assign `String "some text"` to `r` +<2> declare `String s`; + assign `String 'some text'` to `s` +<3> declare `String t`; + allocate `String` instance with arguments (`String "some text"`) + -> `String "some text"`; + assign `String "some text"` to `t` +<4> declare `String u`; + assign default `null` to `u` [[void-type]] ==== void Type -The `void` type represents the concept of no type. In Painless, `void` declares -that a function has no return value. +The `void` type represents the concept of a lack of type. Use the `void` type to +indicate a function returns no value. -[[array-type]] -==== Array Type +*Examples* -Arrays contain a series of elements of the same type that can be allocated -simultaneously. Painless supports both single and multi-dimensional arrays for -all types except void (including `def`). - -You declare an array by specifying a type followed by a series of empty brackets, -where each set of brackets represents a dimension. Declared arrays have a default -value of `null` and are themselves a reference type. - -To allocate an array, you use the `new` keyword followed by the type and a -set of brackets for each dimension. You can explicitly define the size of each dimension by specifying an expression within the brackets, or initialize each -dimension with the desired number of values. The allocated size of each -dimension is its permanent size. - -To initialize an array, specify the values you want to initialize -each dimension with as a comma-separated list of expressions enclosed in braces. -For example, `new int[] {1, 2, 3}` creates a one-dimensional `int` array with a -size of 3 and the values 1, 2, and 3. +* Use of the `void` type in a function. ++ +[source,Painless] +---- +void addToList(List l, def d) { + l.add(d); +} +---- -When you initialize an array, the order of the expressions is maintained. Each expression used as part of the initialization is converted to the -array's type. An error occurs if the types do not match. +[[array-type]] +==== Array Type -*Grammar:* -[source,ANTLR4] +An array type is a specialized reference type where an array type instance +represents a series of values allocated to the heap. All values in an array +type instance are of the same type. Each value is assigned an index from within +the range `[0, length)` where length is the total number of values allocated for +the array type instance. + +Use the <> or the +<> to allocate an array +type instance. Declare an array type <>, and +assign it an array type value for evaluation during later operations. The +default value for a newly-declared array type variable is `null`. An array type +value is shallow-copied during an assignment or as an argument for a +method/function call. Assign `null` to an array type variable to indicate the +array type value refers to no array type instance. The JVM will garbage collect +an array type instance when it is no longer referred to by any array type +values. Pass `null` as an argument to a method/function call to indicate the +argument refers to no array type instance. + +Use the <> to retrieve the length of an +array type value as an int type value. Use the +<> to load from and store to individual +values within an array type value. + +When an array type instance is allocated with multiple dimensions using the +range `[2, d]` where `d >= 2`, each dimension in the range `[1, d-1]` is also +an array type. The array type of each dimension, `n`, is an array type with the +number of dimensions equal to `d-n`. For example, consider `int[][][]` with 3 +dimensions. The 3rd dimension, `d-3`, is the primitive type `int`. The 2nd +dimension, `d-2`, is the array type `int[]`. And the 1st dimension, `d-1` is +the array type `int[][]`. + +*Examples* + +* General use of single-dimensional arrays. ++ +[source,Painless] ---- -declare_array: TYPE ('[' ']')+; - -array_initialization: 'new' TYPE '[' ']' '{' expression (',' expression) '}' - | 'new' TYPE '[' ']' '{' '}'; +<1> int[] x; +<2> float[] y = new float[10]; +<3> def z = new float[5]; +<4> y[9] = 1.0F; +<5> z[0] = y[9]; ---- - -*Examples:* -[source,Java] ++ +<1> declare `int[] x`; + assign default `null` to `x` +<2> declare `float[] y`; + allocate `1-d float array` instance with `length [10]` + -> `1-d float array reference`; + assign `1-d float array reference` to `y` +<3> declare `def z`; + allocate `1-d float array` instance with `length [5]` + -> `1-d float array reference`; + implicit cast `1-d float array reference` to `def` -> `def`; + assign `def` to `z` +<4> access `y` -> `1-d float array reference`; + assign `float 1.0` to `index [9]` of `1-d float array reference` +<5> access `y` -> `1-d float array reference @0`; + access `index [9]` of `1-d float array reference @0` -> `float 1.0`; + access `z` -> `def`; + implicit cast `def` to `1-d float array reference @1` + -> `1-d float array reference @1`; + assign `float 1.0` to `index [0]` of `1-d float array reference @1` ++ +* Use of a multi-dimensional array. ++ +[source,Painless] ---- -int[] x = new int[5]; // Declare int array x and assign it a newly - // allocated int array with a size of 5 -def[][] y = new def[5][5]; // Declare the 2-dimensional def array y and - // assign it a newly allocated 2-dimensional - // array where both dimensions have a size of 5 -int[] x = new int[] {1, 2, 3}; // Declare int array x and set it to an int - // array with values 1, 2, 3 and a size of 3 -int i = 1; -long l = 2L; -float f = 3.0F; -double d = 4.0; -String s = "5"; -def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to - // a def array with a size of 4 and the - // values i, l, f*d, and s +<1> int[][][] ia3 = new int[2][3][4]; +<2> ia3[1][2][3] = 99; +<3> int i = ia3[1][2][3]; ---- ++ +<1> declare `int[][][] ia`; + allocate `3-d int array` instance with length `[2, 3, 4]` + -> `3-d int array reference`; + assign `3-d int array reference` to `ia3` +<2> access `ia3` -> `3-d int array reference`; + assign `int 99` to `index [1, 2, 3]` of `3-d int array reference` +<3> declare `int i`; + access `ia3` -> `3-d int array reference`; + access `index [1, 2, 3]` of `3-d int array reference` -> `int 99`; + assign `int 99` to `i` diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-variables.asciidoc index 9756676a08b5b..8b8782b151132 100644 --- a/docs/painless/painless-variables.asciidoc +++ b/docs/painless/painless-variables.asciidoc @@ -1,29 +1,31 @@ [[painless-variables]] === Variables -<> variables to <> values for -<> in expressions. Specify variables as a -<>, <>, or -<>. Variable operations follow the structure of a -standard JVM in relation to instruction execution and memory usage. +A variable loads and stores a value for evaluation during +<>. [[declaration]] ==== Declaration -Declare variables before use with the format of <> -<>. Specify a comma-separated list of -<> following the <> -to declare multiple variables in a single statement. Use an -<> statement combined with a declaration statement to -immediately assign a value to a variable. Variables not immediately assigned a -value will have a default value assigned implicitly based on the -<>. +Declare a variable before use with the format of <> +followed by <>. Declare an +<> variable using an opening `[` token and a closing `]` +token for each dimension directly after the identifier. Specify a +comma-separated list of identifiers following the type to declare multiple +variables in a single statement. Use an <> +combined with a declaration to immediately assign a value to a variable. +A variable not immediately assigned a value will have a default value assigned +implicitly based on the type. + +*Errors* + +* If a variable is used prior to or without declaration. *Grammar* [source,ANTLR4] ---- declaration : type ID assignment? (',' ID assignment?)*; -type: ID ('[' ']')*; +type: ID ('.' ID)* ('[' ']')*; assignment: '=' expression; ---- @@ -35,27 +37,43 @@ assignment: '=' expression; ---- <1> int x; <2> List y; -<3> int x, y, z; -<4> def[] d; +<3> int x, y = 5, z; +<4> def d; <5> int i = 10; ----- -+ -<1> declare a variable of type `int` and identifier `x` -<2> declare a variable of type `List` and identifier `y` -<3> declare three variables of type `int` and identifiers `x`, `y`, `z` -<4> declare a variable of type `def[]` and identifier `d` -<5> declare a variable of type `int` and identifier `i`; - assign the integer literal `10` to `i` +<6> float[] f; +<7> Map[][] m; +---- ++ +<1> declare `int x`; + assign default `null` to `x` +<2> declare `List y`; + assign default `null` to `y` +<3> declare `int x`; + assign default `int 0` to `x`; + declare `int y`; + assign `int 5` to `y`; + declare `int z`; + assign default `int 0` to `z`; +<4> declare `def d`; + assign default `null` to `d` +<5> declare `int i`; + assign `int 10` to `i` +<6> declare `float[] f`; + assign default `null` to `f` +<7> declare `Map[][] m`; + assign default `null` to `m` [[assignment]] ==== Assignment -Use the `equals` operator (`=`) to assign a value to a variable. Any expression +Use the *assignment operator* to store a value in a variable. Any operation that produces a value can be assigned to any variable as long as the -<> are the same or the resultant -<> can be implicitly <> to -the variable <>. Otherwise, an error will occur. -<> values are shallow-copied when assigned. +<> are the same or the resultant type can be +<> to the variable type. + +*Errors* + +* If the type of value is unable to match the type of variable. *Grammar* [source,ANTLR4] @@ -65,7 +83,7 @@ assignment: ID '=' expression *Examples* -* Variable assignment with an <>. +* Variable assignment with an integer literal. + [source,Painless] ---- @@ -73,10 +91,11 @@ assignment: ID '=' expression <2> i = 10; ---- + -<1> declare `int i` -<2> assign `10` to `i` +<1> declare `int i`; + assign default `int 0` to `i` +<2> assign `int 10` to `i` + -* <> combined with immediate variable assignment. +* Declaration combined with immediate assignment. + [source,Painless] ---- @@ -84,11 +103,12 @@ assignment: ID '=' expression <2> double j = 2.0; ---- + -<1> declare `int i`; assign `10` to `i` -<2> declare `double j`; assign `2.0` to `j` +<1> declare `int i`; + assign `int 10` to `i` +<2> declare `double j`; + assign `double 2.0` to `j` + -* Assignment of one variable to another using -<>. +* Assignment of one variable to another using primitive types. + [source,Painless] ---- @@ -96,11 +116,13 @@ assignment: ID '=' expression <2> int j = i; ---- + -<1> declare `int i`; assign `10` to `i` -<2> declare `int j`; assign `j` to `i` +<1> declare `int i`; + assign `int 10` to `i` +<2> declare `int j`; + access `i` -> `int 10`; + assign `int 10` to `j` + -* Assignment with <> using the -<>. +* Assignment with reference types using the *new instance operator*. + [source,Painless] ---- @@ -108,12 +130,15 @@ assignment: ID '=' expression <2> Map m = new HashMap(); ---- + -<1> declare `ArrayList l`; assign a newly-allocated `Arraylist` to `l` -<2> declare `Map m`; assign a newly-allocated `HashMap` to `m` - with an implicit cast to `Map` +<1> declare `ArrayList l`; + allocate `ArrayList` instance -> `ArrayList reference`; + assign `ArrayList reference` to `l` +<2> declare `Map m`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + assign `Map reference` to `m` + -* Assignment of one variable to another using -<>. +* Assignment of one variable to another using reference types. + [source,Painless] ---- @@ -123,8 +148,52 @@ assignment: ID '=' expression <4> m = k; ---- + -<1> declare `List l`; assign a newly-allocated `Arraylist` to `l` - with an implicit cast to `List` -<2> declare `List k`; assign a shallow-copy of `l` to `k` +<1> declare `List l`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + assign `List reference` to `l` +<2> declare `List k`; + access `l` -> `List reference`; + assign `List reference` to `k`; + (note `l` and `k` refer to the same instance known as a shallow-copy) <3> declare `List m`; -<4> assign a shallow-copy of `k` to `m` + assign default `null` to `m` +<4> access `k` -> `List reference`; + assign `List reference` to `m`; + (note `l`, `k`, and `m` refer to the same instance) ++ +* Assignment with an array type variable using the *new array operator*. ++ +[source,Painless] +---- +<1> int[] ia1; +<2> ia1 = new int[2]; +<3> ia1[0] = 1; +<4> int[] ib1 = ia1; +<5> int[][] ic2 = new int[2][5]; +<6> ic2[1][3] = 2; +<7> ic2[0] = ia1; +---- ++ +<1> declare `int[] ia1`; + assign default `null` to `ia1` +<2> allocate `1-d int array` instance with `length [2]` + -> `1-d int array reference`; + assign `1-d int array reference` to `ia1` +<3> access `ia1` -> `1-d int array reference`; + assign `int 1` to `index [0]` of `1-d int array reference` +<4> declare `int[] ib1`; + access `ia1` -> `1-d int array reference`; + assign `1-d int array reference` to `ib1`; + (note `ia1` and `ib1` refer to the same instance known as a shallow copy) +<5> declare `int[][] ic2`; + allocate `2-d int array` instance with `length [2, 5]` + -> `2-d int array reference`; + assign `2-d int array reference` to `ic2` +<6> access `ic2` -> `2-d int array reference`; + assign `int 2` to `index [1, 3]` of `2-d int array reference` +<7> access `ia1` -> `1-d int array reference`; + access `ic2` -> `2-d int array reference`; + assign `1-d int array reference` to + `index [0]` of `2-d int array reference`; + (note `ia1`, `ib1`, and `index [0]` of `ia2` refer to the same instance) diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 9f80e4ab91c60..893acf05a4bad 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -84,7 +84,7 @@ When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` mus [[repository-azure-repository-settings]] -===== Repository settings +==== Repository settings The Azure repository supports following settings: @@ -178,7 +178,7 @@ client.admin().cluster().preparePutRepository("my_backup_java1") ---- [[repository-azure-validation]] -===== Repository validation rules +==== Repository validation rules According to the http://msdn.microsoft.com/en-us/library/dd135715.aspx[containers naming guide], a container name must be a valid DNS name, conforming to the following naming rules: diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index d718a0b2da6ff..78f7607b1e443 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -378,7 +378,8 @@ PUT /catalan_example "filter": { "catalan_elision": { "type": "elision", - "articles": [ "d", "l", "m", "n", "s", "t"] + "articles": [ "d", "l", "m", "n", "s", "t"], + "articles_case": true }, "catalan_stop": { "type": "stop", @@ -1156,7 +1157,8 @@ PUT /italian_example "nell", "sull", "coll", "pell", "gl", "agl", "dagl", "degl", "negl", "sugl", "un", "m", "t", "s", "v", "d" - ] + ], + "articles_case": true }, "italian_stop": { "type": "stop", diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index add0abdec0123..d6f15ded05fab 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -103,6 +103,11 @@ The `simple_pattern` tokenizer uses a regular expression to capture matching text as terms. It uses a restricted subset of regular expression features and is generally faster than the `pattern` tokenizer. +<>:: + +The `char_group` tokenizer is configurable through sets of characters to split +on, which is usually less expensive than running regular expressions. + <>:: The `simple_pattern_split` tokenizer uses the same restricted regular expression @@ -143,6 +148,8 @@ include::tokenizers/keyword-tokenizer.asciidoc[] include::tokenizers/pattern-tokenizer.asciidoc[] +include::tokenizers/chargroup-tokenizer.asciidoc[] + include::tokenizers/simplepattern-tokenizer.asciidoc[] include::tokenizers/simplepatternsplit-tokenizer.asciidoc[] diff --git a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc new file mode 100644 index 0000000000000..e6bf79b0e961f --- /dev/null +++ b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc @@ -0,0 +1,80 @@ +[[analysis-chargroup-tokenizer]] +=== Char Group Tokenizer + +The `char_group` tokenizer breaks text into terms whenever it encounters a +character which is in a defined set. It is mostly useful for cases where a simple +custom tokenization is desired, and the overhead of use of the <> +is not acceptable. + +[float] +=== Configuration + +The `char_group` tokenizer accepts one parameter: + +[horizontal] +`tokenize_on_chars`:: + A list containing a list of characters to tokenize the string on. Whenever a character + from this list is encountered, a new token is started. This accepts either single + characters like eg. `-`, or character groups: `whitespace`, `letter`, `digit`, + `punctuation`, `symbol`. + + +[float] +=== Example output + +[source,js] +--------------------------- +POST _analyze +{ + "tokenizer": { + "type": "char_group", + "tokenize_on_chars": [ + "whitespace", + "-", + "\n" + ] + }, + "text": "The QUICK brown-fox" +} +--------------------------- +// CONSOLE + +returns + +[source,js] +--------------------------- +{ + "tokens": [ + { + "token": "The", + "start_offset": 0, + "end_offset": 3, + "type": "word", + "position": 0 + }, + { + "token": "QUICK", + "start_offset": 4, + "end_offset": 9, + "type": "word", + "position": 1 + }, + { + "token": "brown", + "start_offset": 10, + "end_offset": 15, + "type": "word", + "position": 2 + }, + { + "token": "fox", + "start_offset": 16, + "end_offset": 19, + "type": "word", + "position": 3 + } + ] +} +--------------------------- +// TESTRESPONSE + diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 2cbc3a5bc54ad..ecb2e8dace23a 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -40,6 +40,8 @@ string:: <> and <> <>:: Defines parent/child relation for documents within the same index +<>:: Record numeric features to boost hits at query time. + [float] === Multi-fields @@ -86,6 +88,6 @@ include::types/percolator.asciidoc[] include::types/parent-join.asciidoc[] - +include::types/feature.asciidoc[] diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc new file mode 100644 index 0000000000000..3b5e78d5fb46b --- /dev/null +++ b/docs/reference/mapping/types/feature.asciidoc @@ -0,0 +1,59 @@ +[[feature]] +=== Feature datatype + +A `feature` field can index numbers so that they can later be used to boost +documents in queries with a <> query. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> + } + } + } + } +} + +PUT my_index/_doc/1 +{ + "pagerank": 8, + "url_length": 22 +} + +GET my_index/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Feature fields must use the `feature` field type +<2> Features that correlate negatively with the score need to declare it + +NOTE: `feature` fields only support single-valued fields and strictly positive +values. Multi-valued fields and negative values will be rejected. + +NOTE: `feature` fields do not support querying, sorting or aggregating. They may +only be used within <> queries. + +NOTE: `feature` fields only preserve 9 significant bits for the precision, which +translates to a relative error of about 0.4%. + +Features that correlate negatively with the score should set +`positive_score_impact` to `false` (defaults to `true`). This will be used by +the <> query to modify the scoring formula +in such a way that the score decreases with the value of the feature instead of +increasing. For instance in web search, the url length is a commonly used +feature which correlates negatively with scores. diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index 365a2c5a39f1e..829a93573c905 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -10,7 +10,7 @@ You need to use settings which are starting with `azure.client.` prefix instead. * Global timeout setting `cloud.azure.storage.timeout` has been removed. You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. -See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. +See {plugins}/repository-azure-repository-settings.html#repository-azure-repository-settings[Azure Repository settings]. ==== Google Cloud Storage Repository plugin diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index d62d7e6065de0..7826afc05fa59 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -29,6 +29,14 @@ [[remove-http-enabled]] ==== Http enabled setting removed -The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing +* The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing use of the transport client. This setting has been removed, as the transport client will be removed in the future, thus requiring HTTP to always be enabled. + +[[remove-http-pipelining-setting]] +==== Http pipelining setting removed + +* The setting `http.pipelining` previously allowed disabling HTTP pipelining support. +This setting has been removed, as disabling http pipelining support on the server +provided little value. The setting `http.pipelining.max_events` can still be used to +limit the number of pipelined requests in-flight. diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 7f29a9db7f605..dab8e8136893e 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -96,8 +96,6 @@ and stack traces in response output. Note: When set to `false` and the `error_tr parameter is specified, an error will be returned; when `error_trace` is not specified, a simple message will be returned. Defaults to `true` -|`http.pipelining` |Enable or disable HTTP pipelining, defaults to `true`. - |`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`. |`http.max_warning_header_count` |The maximum number of warning headers in diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 857f54132cc0a..3df187086bb69 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -76,7 +76,7 @@ memory on a node. The memory usage is based on the content length of the request [float] ==== Accounting requests circuit breaker -The in flight requests circuit breaker allows Elasticsearch to limit the memory +The accounting circuit breaker allows Elasticsearch to limit the memory usage of things held in memory that are not released when a request is completed. This includes things like the Lucene segment memory. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index f70857e66c86f..7efcf222f3ac0 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -124,8 +124,8 @@ the shared file system repository it is necessary to mount the same shared files master and data nodes. This location (or one of its parent directories) must be registered in the `path.repo` setting on all master and data nodes. -Assuming that the shared filesystem is mounted to `/mount/backups/my_backup`, the following setting should be added to -`elasticsearch.yml` file: +Assuming that the shared filesystem is mounted to `/mount/backups/my_fs_backup_location`, the following setting should +be added to `elasticsearch.yml` file: [source,yaml] -------------- @@ -141,7 +141,7 @@ path.repo: ["\\\\MY_SERVER\\Snapshots"] -------------- After all nodes are restarted, the following command can be used to register the shared file system repository with -the name `my_backup`: +the name `my_fs_backup`: [source,js] ----------------------------------- @@ -419,7 +419,7 @@ A repository can be unregistered using the following command: [source,sh] ----------------------------------- -DELETE /_snapshot/my_fs_backup +DELETE /_snapshot/my_backup ----------------------------------- // CONSOLE // TEST[continued] diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc new file mode 100644 index 0000000000000..19c29b1cf3ab8 --- /dev/null +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -0,0 +1,181 @@ +[[query-dsl-feature-query]] +=== Feature Query + +The `feature` query is a specialized query that only works on +<> fields. Its goal is to boost the score of documents based +on the values of numeric features. It is typically put in a `should` clause of +a <> query so that its score is added to the score +of the query. + +Compared to using <> or other +ways to modify the score, this query has the benefit of being able to +efficiently skip non-competitive hits when +<> is set to `false`. Speedups may be +spectacular. + +Here is an example: + +[source,js] +-------------------------------------------------- +PUT test +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + } + } + } + } +} + +PUT test/_doc/1 +{ + "pagerank": 10, + "url_length": 50 +} + +PUT test/_doc/2 +{ + "pagerank": 100, + "url_length": 20 +} + +POST test/_refresh + +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} + +GET test/_search +{ + "query": { + "feature": { + "field": "url_length" + } + } +} +-------------------------------------------------- +// CONSOLE + +[float] +=== Supported functions + +The `feature` query supports 3 functions in order to boost scores using the +values of features. If you do not know where to start, we recommend that you +start with the `saturation` function, which is the default when no function is +provided. + +[float] +==== Saturation + +This function gives a score that is equal to `S / (S + pivot)` where `S` is the +value of the feature and `pivot` is a configurable pivot value so that the +result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ +otherwise. Scores are always is +(0, 1)+. + +If the feature has a negative score impact then the function will be computed as +`pivot / (S + pivot)`, which decreases when `S` increases. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": { + "pivot": 8 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If +pivot+ is not supplied then Elasticsearch will compute a default value that +will be approximately equal to the geometric mean of all feature values that +exist in the index. We recommend this if you haven't had the opportunity to +train a good pivot value. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": {} + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Logarithm + +This function gives a score that is equal to `log(scaling_factor + S)` where +`S` is the value of the feature and `scaling_factor` is a configurable scaling +factor. Scores are unbounded. + +This function only supports features that have a positive score impact. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "log": { + "scaling_factor": 4 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Sigmoid + +This function is an extension of `saturation` which adds a configurable +exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the +`saturation` function, `pivot` is the value of `S` that gives a score of +0.5+ +and scores are in +(0, 1)+. + +`exponent` must be positive, but is typically in +[0.5, 1]+. A good value should +be computed via traning. If you don't have the opportunity to do so, we recommend +that you stick to the `saturation` function instead. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "sigmoid": { + "pivot": 7, + "exponent": 0.6 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 08b504951e1bf..4e00a2f49b475 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -93,6 +93,7 @@ to 'shapes'. * `type` - Index type where the pre-indexed shape is. * `path` - The field specified as path containing the pre-indexed shape. Defaults to 'shape'. +* `routing` - The routing of the shape document if required. The following is an example of using the Filter with a pre-indexed shape: diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index a062fa7ddb1fb..4c69889040eb1 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -19,6 +19,11 @@ This query allows a script to act as a filter. Also see the This query finds queries that are stored as documents that match with the specified document. +<>:: + +A query that computes scores based on the values of numeric features and is +able to efficiently skip non-competitive hits. + <>:: A query that accepts other queries as json or yaml string. @@ -29,4 +34,6 @@ include::script-query.asciidoc[] include::percolate-query.asciidoc[] +include::feature-query.asciidoc[] + include::wrapper-query.asciidoc[] diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index b4d2493d8536d..9d917c27ab084 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -11,13 +11,38 @@ GET /_search "query" : { "match_all": {} }, - "docvalue_fields" : ["test1", "test2"] + "docvalue_fields" : [ + { + "field": "my_ip_field", <1> + "format": "use_field_mapping" <2> + }, + { + "field": "my_date_field", + "format": "epoch_millis" <3> + } + ] } -------------------------------------------------- // CONSOLE +<1> the name of the field +<2> the special `use_field_mapping` format tells Elasticsearch to use the format from the mapping +<3> date fields may use a custom format Doc value fields can work on fields that are not stored. Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. +[float] +==== Custom formats + +While most fields do not support custom formats, some of them do: + - <> fields can take any <>. + - <> fields accept a https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html[DecimalFormat pattern]. + +All fields support the special `use_field_mapping` format, which tells +Elasticsearch to use the mappings to figure out a default format. + +NOTE: The default is currently to return the same output as +<>. However it will change in 7.0 +to behave as if the `use_field_mapping` format was provided. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index dce6bb2a2d8bc..887ae2bdf149e 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -242,7 +242,12 @@ POST test/_search }, "inner_hits": { "_source" : false, - "docvalue_fields" : ["comments.text.keyword"] + "docvalue_fields" : [ + { + "field": "comments.text.keyword", + "format": "use_field_mapping" + } + ] } } } diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 55623faf2684c..da5868ea7d65e 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -15,13 +15,13 @@ GET /_search "test1" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * 2" + "source": "doc['price'].value * 2" } }, "test2" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * params.factor", + "source": "doc['price'].value * params.factor", "params" : { "factor" : 2.0 } @@ -31,7 +31,7 @@ GET /_search } -------------------------------------------------- // CONSOLE - +// TEST[setup:sales] Script fields can work on fields that are not stored (`my_field_name` in the above case), and allow to return custom values to be returned (the diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index 622b01c453e0a..7cac034f29c25 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -7,7 +7,7 @@ scattered to all the relevant shards and then all the results are gathered back. When doing scatter/gather type execution, there are several ways to do that, specifically with search engines. -One of the questions when executing a distributed search is how much +One of the questions when executing a distributed search is how many results to retrieve from each shard. For example, if we have 10 shards, the 1st shard might hold the most relevant results from 0 till 10, with other shards results ranking below it. For this reason, when executing a diff --git a/libs/elasticsearch-nio/build.gradle b/libs/elasticsearch-nio/build.gradle index a32a860a62848..018874adf7082 100644 --- a/libs/elasticsearch-nio/build.gradle +++ b/libs/elasticsearch-nio/build.gradle @@ -33,8 +33,6 @@ publishing { } dependencies { - compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" - testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -64,18 +62,3 @@ forbiddenApisMain { // es-all is not checked as we connect and accept sockets signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } - -//JarHell is part of es core, which we don't want to pull in -jarHell.enabled=false - -thirdPartyAudit.excludes = [ - 'org/osgi/framework/AdaptPermission', - 'org/osgi/framework/AdminPermission', - 'org/osgi/framework/Bundle', - 'org/osgi/framework/BundleActivator', - 'org/osgi/framework/BundleContext', - 'org/osgi/framework/BundleEvent', - 'org/osgi/framework/SynchronousBundleListener', - 'org/osgi/framework/wiring/BundleWire', - 'org/osgi/framework/wiring/BundleWiring' -] diff --git a/libs/elasticsearch-nio/licenses/log4j-api-2.9.1.jar.sha1 b/libs/elasticsearch-nio/licenses/log4j-api-2.9.1.jar.sha1 deleted file mode 100644 index e1a89fadfed95..0000000000000 --- a/libs/elasticsearch-nio/licenses/log4j-api-2.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/libs/elasticsearch-nio/licenses/log4j-api-LICENSE.txt b/libs/elasticsearch-nio/licenses/log4j-api-LICENSE.txt deleted file mode 100644 index 6279e5206de13..0000000000000 --- a/libs/elasticsearch-nio/licenses/log4j-api-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 1999-2005 The Apache Software Foundation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/libs/elasticsearch-nio/licenses/log4j-api-NOTICE.txt b/libs/elasticsearch-nio/licenses/log4j-api-NOTICE.txt deleted file mode 100644 index 0375732360047..0000000000000 --- a/libs/elasticsearch-nio/licenses/log4j-api-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache log4j -Copyright 2007 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java index 474efad3c77b9..f3aab9c9be125 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java @@ -19,11 +19,9 @@ package org.elasticsearch.nio; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; - import java.io.IOException; import java.nio.channels.SelectionKey; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -33,8 +31,8 @@ public class AcceptorEventHandler extends EventHandler { private final Supplier selectorSupplier; - public AcceptorEventHandler(Logger logger, Supplier selectorSupplier) { - super(logger); + public AcceptorEventHandler(Supplier selectorSupplier, Consumer exceptionHandler) { + super(exceptionHandler); this.selectorSupplier = selectorSupplier; } @@ -58,7 +56,7 @@ protected void handleRegistration(ServerChannelContext context) throws IOExcepti * @param exception that occurred */ protected void registrationException(ServerChannelContext context, Exception exception) { - logger.error(new ParameterizedMessage("failed to register server channel: {}", context.getChannel()), exception); + context.handleException(exception); } /** @@ -78,7 +76,6 @@ protected void acceptChannel(ServerChannelContext context) throws IOException { * @param exception that occurred */ protected void acceptException(ServerChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("exception while accepting new channel from server channel: {}", - context.getChannel()), exception); + context.handleException(exception); } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java index e623e37f005da..c6cf97d10d38e 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java @@ -83,7 +83,7 @@ public void runLoop() { try { selector.close(); } catch (IOException e) { - eventHandler.closeSelectorException(e); + eventHandler.selectorException(e); } finally { runLock.unlock(); exitedLoop.countDown(); @@ -123,7 +123,7 @@ void singleLoop() { throw e; } } catch (IOException e) { - eventHandler.selectException(e); + eventHandler.selectorException(e); } catch (Exception e) { eventHandler.uncaughtException(e); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java index d35b73c56b88d..cb4d43af4fdc3 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java @@ -19,37 +19,26 @@ package org.elasticsearch.nio; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; - import java.io.IOException; import java.nio.channels.Selector; +import java.util.function.Consumer; public abstract class EventHandler { - protected final Logger logger; - - EventHandler(Logger logger) { - this.logger = logger; - } + protected final Consumer exceptionHandler; - /** - * This method handles an IOException that was thrown during a call to {@link Selector#select(long)}. - * - * @param exception the exception - */ - protected void selectException(IOException exception) { - logger.warn(new ParameterizedMessage("io exception during select [thread={}]", Thread.currentThread().getName()), exception); + protected EventHandler(Consumer exceptionHandler) { + this.exceptionHandler = exceptionHandler; } /** - * This method handles an IOException that was thrown during a call to {@link Selector#close()}. + * This method handles an IOException that was thrown during a call to {@link Selector#select(long)} or + * {@link Selector#close()}. * * @param exception the exception */ - protected void closeSelectorException(IOException exception) { - logger.warn(new ParameterizedMessage("io exception while closing selector [thread={}]", Thread.currentThread().getName()), - exception); + protected void selectorException(IOException exception) { + exceptionHandler.accept(exception); } /** @@ -79,11 +68,11 @@ protected void handleClose(ChannelContext context) { /** * This method is called when an attempt to close a channel throws an exception. * - * @param context that was being closed + * @param channel that was being closed * @param exception that occurred */ - protected void closeException(ChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", context.getChannel()), exception); + protected void closeException(ChannelContext channel, Exception exception) { + channel.handleException(exception); } /** @@ -95,6 +84,6 @@ protected void closeException(ChannelContext context, Exception exception) { * @param exception that was thrown */ protected void genericChannelException(ChannelContext channel, Exception exception) { - logger.debug(() -> new ParameterizedMessage("exception while handling event for channel: {}", channel.getChannel()), exception); + channel.handleException(exception); } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java index b763765616275..3f2fd44259c64 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java @@ -19,7 +19,6 @@ package org.elasticsearch.nio; -import org.apache.logging.log4j.Logger; import org.elasticsearch.nio.utils.ExceptionsHelper; import java.io.IOException; @@ -29,7 +28,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -56,16 +54,16 @@ public class NioGroup implements AutoCloseable { private final AtomicBoolean isOpen = new AtomicBoolean(true); - public NioGroup(Logger logger, ThreadFactory acceptorThreadFactory, int acceptorCount, - BiFunction, AcceptorEventHandler> acceptorEventHandlerFunction, + public NioGroup(ThreadFactory acceptorThreadFactory, int acceptorCount, + Function, AcceptorEventHandler> acceptorEventHandlerFunction, ThreadFactory socketSelectorThreadFactory, int socketSelectorCount, - Function socketEventHandlerFunction) throws IOException { + Supplier socketEventHandlerFunction) throws IOException { acceptors = new ArrayList<>(acceptorCount); socketSelectors = new ArrayList<>(socketSelectorCount); try { for (int i = 0; i < socketSelectorCount; ++i) { - SocketSelector selector = new SocketSelector(socketEventHandlerFunction.apply(logger)); + SocketSelector selector = new SocketSelector(socketEventHandlerFunction.get()); socketSelectors.add(selector); } startSelectors(socketSelectors, socketSelectorThreadFactory); @@ -73,7 +71,7 @@ public NioGroup(Logger logger, ThreadFactory acceptorThreadFactory, int acceptor for (int i = 0; i < acceptorCount; ++i) { SocketSelector[] childSelectors = this.socketSelectors.toArray(new SocketSelector[this.socketSelectors.size()]); Supplier selectorSupplier = new RoundRobinSupplier<>(childSelectors); - AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(logger, selectorSupplier)); + AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(selectorSupplier)); acceptors.add(acceptor); } startSelectors(acceptors, acceptorThreadFactory); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index cacee47e96196..b486243f2197d 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -19,23 +19,17 @@ package org.elasticsearch.nio; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; - import java.io.IOException; import java.nio.channels.SelectionKey; -import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * Event handler designed to handle events from non-server sockets */ public class SocketEventHandler extends EventHandler { - private final Logger logger; - - public SocketEventHandler(Logger logger) { - super(logger); - this.logger = logger; + public SocketEventHandler(Consumer exceptionHandler) { + super(exceptionHandler); } /** @@ -62,7 +56,6 @@ protected void handleRegistration(SocketChannelContext context) throws IOExcepti * @param exception that occurred */ protected void registrationException(SocketChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", context.getChannel()), exception); context.handleException(exception); } @@ -85,7 +78,6 @@ protected void handleConnect(SocketChannelContext context) throws IOException { * @param exception that occurred */ protected void connectException(SocketChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", context.getChannel()), exception); context.handleException(exception); } @@ -106,7 +98,6 @@ protected void handleRead(SocketChannelContext context) throws IOException { * @param exception that occurred */ protected void readException(SocketChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", context.getChannel()), exception); context.handleException(exception); } @@ -127,18 +118,16 @@ protected void handleWrite(SocketChannelContext context) throws IOException { * @param exception that occurred */ protected void writeException(SocketChannelContext context, Exception exception) { - logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", context.getChannel()), exception); context.handleException(exception); } /** * This method is called when a listener attached to a channel operation throws an exception. * - * @param listener that was called * @param exception that occurred */ - protected void listenerException(BiConsumer listener, Exception exception) { - logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception); + protected void listenerException(Exception exception) { + exceptionHandler.accept(exception); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index b1a3a08f02ddf..88b3cef41cd01 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -143,7 +143,7 @@ public void executeListener(BiConsumer listener, V value) { try { listener.accept(value, null); } catch (Exception e) { - eventHandler.listenerException(listener, e); + eventHandler.listenerException(e); } } @@ -159,7 +159,7 @@ public void executeFailedListener(BiConsumer listener, Excepti try { listener.accept(null, exception); } catch (Exception e) { - eventHandler.listenerException(listener, e); + eventHandler.listenerException(e); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java index 50469b30acde9..a162a8e234c21 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java @@ -50,7 +50,7 @@ public void setUpHandler() throws IOException { ArrayList selectors = new ArrayList<>(); selectors.add(mock(SocketSelector.class)); selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()])); - handler = new AcceptorEventHandler(logger, selectorSupplier); + handler = new AcceptorEventHandler(selectorSupplier, mock(Consumer.class)); channel = new NioServerSocketChannel(mock(ServerSocketChannel.class)); context = new DoNotRegisterContext(channel, mock(AcceptingSelector.class), mock(Consumer.class)); @@ -99,6 +99,14 @@ public void testHandleAcceptCallsServerAcceptCallback() throws IOException { verify(serverChannelContext).acceptChannels(selectorSupplier); } + public void testAcceptExceptionCallsExceptionHandler() throws IOException { + ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); + IOException exception = new IOException(); + handler.acceptException(serverChannelContext, exception); + + verify(serverChannelContext).handleException(exception); + } + private class DoNotRegisterContext extends ServerChannelContext { diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java index cb8f0757fb924..05b84345f45c2 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java @@ -27,7 +27,6 @@ import java.nio.channels.ClosedSelectorException; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; import static org.mockito.Matchers.anyInt; import static org.mockito.Mockito.mock; @@ -81,7 +80,7 @@ public void testIOExceptionWhileSelect() throws IOException { this.selector.singleLoop(); - verify(handler).selectException(ioException); + verify(handler).selectorException(ioException); } public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException { diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java index 068527a525916..13ce2c136544e 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.function.Consumer; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.mockito.Mockito.mock; @@ -34,10 +35,12 @@ public class NioGroupTests extends ESTestCase { private NioGroup nioGroup; @Override + @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); - nioGroup = new NioGroup(logger, daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, AcceptorEventHandler::new, - daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new); + nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, + (s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), 1, + () -> new SocketEventHandler(mock(Consumer.class))); } @Override @@ -69,10 +72,12 @@ public void testCanCloseTwice() throws IOException { nioGroup.close(); } + @SuppressWarnings("unchecked") public void testExceptionAtStartIsHandled() throws IOException { RuntimeException ex = new RuntimeException(); - CheckedRunnable ctor = () -> new NioGroup(logger, r -> {throw ex;}, 1, - AcceptorEventHandler::new, daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new); + CheckedRunnable ctor = () -> new NioGroup(r -> {throw ex;}, 1, + (s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), + 1, () -> new SocketEventHandler(mock(Consumer.class))); RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run); assertSame(ex, runtimeException); // ctor starts threads. So we are testing that a failure to construct will stop threads. Our thread diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index a80563f7d74db..c85d9c0c5a8f8 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -36,7 +36,8 @@ public class SocketEventHandlerTests extends ESTestCase { - private Consumer exceptionHandler; + private Consumer channelExceptionHandler; + private Consumer genericExceptionHandler; private ReadWriteHandler readWriteHandler; private SocketEventHandler handler; @@ -47,15 +48,16 @@ public class SocketEventHandlerTests extends ESTestCase { @Before @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { - exceptionHandler = mock(Consumer.class); + channelExceptionHandler = mock(Consumer.class); + genericExceptionHandler = mock(Consumer.class); readWriteHandler = mock(ReadWriteHandler.class); SocketSelector selector = mock(SocketSelector.class); - handler = new SocketEventHandler(logger); + handler = new SocketEventHandler(genericExceptionHandler); rawChannel = mock(SocketChannel.class); channel = new NioSocketChannel(rawChannel); when(rawChannel.finishConnect()).thenReturn(true); - context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0), readWriteHandler); + context = new DoNotRegisterContext(channel, selector, channelExceptionHandler, new TestSelectionKey(0), readWriteHandler); channel.setContext(context); handler.handleRegistration(context); @@ -96,7 +98,7 @@ public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInte public void testRegistrationExceptionCallsExceptionHandler() throws IOException { CancelledKeyException exception = new CancelledKeyException(); handler.registrationException(context, exception); - verify(exceptionHandler).accept(exception); + verify(channelExceptionHandler).accept(exception); } public void testConnectDoesNotRemoveOP_CONNECTInterestIfIncomplete() throws IOException { @@ -114,7 +116,7 @@ public void testConnectRemovesOP_CONNECTInterestIfComplete() throws IOException public void testConnectExceptionCallsExceptionHandler() throws IOException { IOException exception = new IOException(); handler.connectException(context, exception); - verify(exceptionHandler).accept(exception); + verify(channelExceptionHandler).accept(exception); } public void testHandleReadDelegatesToContext() throws IOException { @@ -130,13 +132,13 @@ public void testHandleReadDelegatesToContext() throws IOException { public void testReadExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.readException(context, exception); - verify(exceptionHandler).accept(exception); + verify(channelExceptionHandler).accept(exception); } public void testWriteExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.writeException(context, exception); - verify(exceptionHandler).accept(exception); + verify(channelExceptionHandler).accept(exception); } public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException { @@ -192,6 +194,12 @@ public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { assertEquals(SelectionKey.OP_READ, key.interestOps()); } + public void testListenerExceptionCallsGenericExceptionHandler() throws IOException { + RuntimeException listenerException = new RuntimeException(); + handler.listenerException(listenerException); + verify(genericExceptionHandler).accept(listenerException); + } + private class DoNotRegisterContext extends BytesChannelContext { private final TestSelectionKey selectionKey; diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index a68f5c05dad5a..78911f2028953 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -297,7 +297,7 @@ public void testExecuteListenerWillHandleException() throws Exception { socketSelector.executeListener(listener, null); - verify(eventHandler).listenerException(listener, exception); + verify(eventHandler).listenerException(exception); } public void testExecuteFailedListenerWillHandleException() throws Exception { @@ -307,6 +307,6 @@ public void testExecuteFailedListenerWillHandleException() throws Exception { socketSelector.executeFailedListener(listener, ioException); - verify(eventHandler).listenerException(listener, exception); + verify(eventHandler).listenerException(exception); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java new file mode 100644 index 0000000000000..d4e1e794a309b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.util.CharTokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; + +import java.util.HashSet; +import java.util.Set; + +public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{ + + private final Set tokenizeOnChars = new HashSet<>(); + private boolean tokenizeOnSpace = false; + private boolean tokenizeOnLetter = false; + private boolean tokenizeOnDigit = false; + private boolean tokenizeOnPunctuation = false; + private boolean tokenizeOnSymbol = false; + + public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + + for (final String c : settings.getAsList("tokenize_on_chars")) { + if (c == null || c.length() == 0) { + throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters"); + } + + if (c.length() == 1) { + tokenizeOnChars.add((int) c.charAt(0)); + } + else if (c.charAt(0) == '\\') { + tokenizeOnChars.add((int) parseEscapedChar(c)); + } else { + switch (c) { + case "letter": + tokenizeOnLetter = true; + break; + case "digit": + tokenizeOnDigit = true; + break; + case "whitespace": + tokenizeOnSpace = true; + break; + case "punctuation": + tokenizeOnPunctuation = true; + break; + case "symbol": + tokenizeOnSymbol = true; + break; + default: + throw new RuntimeException("Invalid escaped char in [" + c + "]"); + } + } + } + } + + private char parseEscapedChar(final String s) { + int len = s.length(); + char c = s.charAt(0); + if (c == '\\') { + if (1 >= len) + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + c = s.charAt(1); + switch (c) { + case '\\': + return '\\'; + case 'n': + return '\n'; + case 't': + return '\t'; + case 'r': + return '\r'; + case 'b': + return '\b'; + case 'f': + return '\f'; + case 'u': + if (len > 6) { + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + } + return (char) Integer.parseInt(s.substring(2), 16); + default: + throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]"); + } + } else { + throw new RuntimeException("Invalid escaped char [" + s + "]"); + } + } + + @Override + public Tokenizer create() { + return new CharTokenizer() { + @Override + protected boolean isTokenChar(int c) { + if (tokenizeOnSpace && Character.isWhitespace(c)) { + return false; + } + if (tokenizeOnLetter && Character.isLetter(c)) { + return false; + } + if (tokenizeOnDigit && Character.isDigit(c)) { + return false; + } + if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) { + return false; + } + if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) { + return false; + } + return !tokenizeOnChars.contains(c); + } + }; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 624194092a02e..02a4197fba94a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -184,6 +184,7 @@ public Map> getTokenizers() { tokenizers.put("ngram", NGramTokenizerFactory::new); tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); + tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java new file mode 100644 index 0000000000000..1447531aa8731 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Arrays; + + +public class CharGroupTokenizerFactoryTests extends ESTokenStreamTestCase { + public void testParseTokenChars() { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings); + final String name = "cg"; + for (String[] conf : Arrays.asList( + new String[] { "\\v" }, + new String[] { "\\u00245" }, + new String[] { "commas" }, + new String[] { "a", "b", "c", "\\$" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", conf).build(); + expectThrows(RuntimeException.class, () -> new CharGroupTokenizerFactory(indexProperties, null, name, settings).create()); + } + + for (String[] conf : Arrays.asList( + new String[0], + new String[] { "\\n" }, + new String[] { "\\u0024" }, + new String[] { "whitespace" }, + new String[] { "a", "b", "c" }, + new String[] { "a", "b", "c", "\\r" }, + new String[] { "\\r" }, + new String[] { "f", "o", "o", "symbol" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", Arrays.asList(conf)).build(); + new CharGroupTokenizerFactory(indexProperties, null, name, settings).create(); + // no exception + } + } + + public void testTokenization() throws IOException { + final Index index = new Index("test", "_na_"); + final String name = "cg"; + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", "whitespace", ":", "\\u0024").build(); + Tokenizer tokenizer = new CharGroupTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), + null, name, settings).create(); + tokenizer.setReader(new StringReader("foo bar $34 test:test2")); + assertTokenStreamContents(tokenizer, new String[] {"foo", "bar", "34", "test", "test2"}); + } +} diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 702782e1c5ed7..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3dba337d06e1f5930cb7ae638c1655b99ce0cb7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..8222106897b18 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +1e28b448387ec05d655f8c81ee54e13ff2975a4d \ No newline at end of file diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java new file mode 100644 index 0000000000000..5b0158ff55b5f --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}. + */ +public class FeatureFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureFieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE.setHasDocValues(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + public Builder positiveScoreImpact(boolean v) { + fieldType().setPositiveScoreImpact(v); + return builder; + } + + @Override + public FeatureFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureFieldMapper( + name, fieldType, defaultFieldType, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + FeatureFieldMapper.Builder builder = new FeatureFieldMapper.Builder(name); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String propName = entry.getKey(); + Object propNode = entry.getValue(); + if (propName.equals("positive_score_impact")) { + builder.positiveScoreImpact(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } + } + return builder; + } + } + + public static final class FeatureFieldType extends MappedFieldType { + + private boolean positiveScoreImpact = true; + + public FeatureFieldType() { + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + } + + protected FeatureFieldType(FeatureFieldType ref) { + super(ref); + this.positiveScoreImpact = ref.positiveScoreImpact; + } + + public FeatureFieldType clone() { + return new FeatureFieldType(this); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + FeatureFieldType other = (FeatureFieldType) o; + return Objects.equals(positiveScoreImpact, other.positiveScoreImpact); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + Objects.hashCode(positiveScoreImpact); + return h; + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + if (positiveScoreImpact != ((FeatureFieldType) other).positiveScoreImpact()) { + conflicts.add("mapper [" + name() + "] has different [positive_score_impact] values"); + } + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + public boolean positiveScoreImpact() { + return positiveScoreImpact; + } + + public void setPositiveScoreImpact(boolean positiveScoreImpact) { + checkIfFrozen(); + this.positiveScoreImpact = positiveScoreImpact; + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new TermQuery(new Term("_feature", name())); + } + + @Override + public Query nullValueQuery() { + if (nullValue() == null) { + return null; + } + return termQuery(nullValue(), null); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder(); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("Queries on [feature] fields are not supported"); + } + } + + private FeatureFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + } + + @Override + protected FeatureFieldMapper clone() { + return (FeatureFieldMapper) super.clone(); + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + float value; + if (context.externalValueSet()) { + Object v = context.externalValue(); + if (v instanceof Number) { + value = ((Number) v).floatValue(); + } else { + value = Float.parseFloat(v.toString()); + } + } else if (context.parser().currentToken() == Token.VALUE_NULL) { + // skip + return; + } else { + value = context.parser().floatValue(); + } + + if (context.doc().getByKey(name()) != null) { + throw new IllegalArgumentException("[feature] fields do not support indexing multiple values for the same field [" + name() + + "] in the same document"); + } + + if (fieldType().positiveScoreImpact() == false) { + value = 1 / value; + } + + context.doc().addWithKey(name(), new FeatureField("_feature", name(), value)); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + + if (includeDefaults || fieldType().nullValue() != null) { + builder.field("null_value", fieldType().nullValue()); + } + + if (includeDefaults || fieldType().positiveScoreImpact() == false) { + builder.field("positive_score_impact", fieldType().positiveScoreImpact()); + } + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java new file mode 100644 index 0000000000000..2102a029a6ad6 --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * This meta field only exists because feature fields index everything into a + * common _feature field and Elasticsearch has a custom codec that complains + * when fields exist in the index and not in mappings. + */ +public class FeatureMetaFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_feature"; + + public static final String CONTENT_TYPE = "_feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureMetaFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setName(NAME); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends MetadataFieldMapper.Builder { + + public Builder(MappedFieldType existing) { + super(NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); + } + + @Override + public FeatureMetaFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureMetaFieldMapper(fieldType, context.indexSettings()); + } + } + + public static class TypeParser implements MetadataFieldMapper.TypeParser { + @Override + public MetadataFieldMapper.Builder parse(String name, + Map node, ParserContext parserContext) throws MapperParsingException { + return new Builder(parserContext.mapperService().fullName(NAME)); + } + + @Override + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new FeatureMetaFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } + } + } + + public static final class FeatureMetaFieldType extends MappedFieldType { + + public FeatureMetaFieldType() { + } + + protected FeatureMetaFieldType(FeatureMetaFieldType ref) { + super(ref); + } + + @Override + public FeatureMetaFieldType clone() { + return new FeatureMetaFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("Cannot run exists query on [_feature]"); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("The [_feature] field may not be queried directly"); + } + } + + private FeatureMetaFieldMapper(Settings indexSettings, MappedFieldType existing) { + this(existing.clone(), indexSettings); + } + + private FeatureMetaFieldMapper(MappedFieldType fieldType, Settings indexSettings) { + super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); + } + + @Override + public void preParse(ParseContext context) throws IOException {} + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + throw new AssertionError("Should never be called"); + } + + @Override + public void postParse(ParseContext context) throws IOException {} + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index 2b249a5fe6e09..4a9aea21a8a53 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -19,21 +19,37 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser; +import org.elasticsearch.index.query.FeatureQueryBuilder; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; import java.util.Collections; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; -public class MapperExtrasPlugin extends Plugin implements MapperPlugin { +public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { Map mappers = new LinkedHashMap<>(); mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); + mappers.put(FeatureFieldMapper.CONTENT_TYPE, new FeatureFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } + @Override + public Map getMetadataMappers() { + return Collections.singletonMap(FeatureMetaFieldMapper.CONTENT_TYPE, new FeatureMetaFieldMapper.TypeParser()); + } + + @Override + public List> getQueries() { + return Collections.singletonList( + new QuerySpec<>(FeatureQueryBuilder.NAME, FeatureQueryBuilder::new, p -> FeatureQueryBuilder.PARSER.parse(p, null))); + } + } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java new file mode 100644 index 0000000000000..761de46731dda --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java @@ -0,0 +1,354 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.FeatureFieldMapper.FeatureFieldType; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Query to run on a [feature] field. + */ +public final class FeatureQueryBuilder extends AbstractQueryBuilder { + + /** + * Scoring function for a [feature] field. + */ + public abstract static class ScoreFunction { + + private ScoreFunction() {} // prevent extensions by users + + abstract void writeTo(StreamOutput out) throws IOException; + + abstract Query toQuery(String feature, boolean positiveScoreImpact) throws IOException; + + abstract void doXContent(XContentBuilder builder) throws IOException; + + /** + * A scoring function that scores documents as {@code Math.log(scalingFactor + S)} + * where S is the value of the static feature. + */ + public static class Log extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "log", a -> new Log((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("scaling_factor")); + } + + private final float scalingFactor; + + public Log(float scalingFactor) { + this.scalingFactor = scalingFactor; + } + + private Log(StreamInput in) throws IOException { + this(in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Log that = (Log) obj; + return scalingFactor == that.scalingFactor; + } + + @Override + public int hashCode() { + return Float.hashCode(scalingFactor); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 0); + out.writeFloat(scalingFactor); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("log"); + builder.field("scaling_factor", scalingFactor); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (positiveScoreImpact == false) { + throw new IllegalArgumentException("Cannot use the [log] function with a field that has a negative score impact as " + + "it would trigger negative scores"); + } + return FeatureField.newLogQuery("_feature", feature, DEFAULT_BOOST, scalingFactor); + } + } + + /** + * A scoring function that scores documents as {@code S / (S + pivot)} where S is + * the value of the static feature. + */ + public static class Saturation extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "saturation", a -> new Saturation((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), new ParseField("pivot")); + } + + private final Float pivot; + + /** Constructor with a default pivot, computed as the geometric average of + * all feature values in the index. */ + public Saturation() { + this((Float) null); + } + + public Saturation(float pivot) { + this(Float.valueOf(pivot)); + } + + private Saturation(Float pivot) { + this.pivot = pivot; + } + + private Saturation(StreamInput in) throws IOException { + this(in.readOptionalFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Saturation that = (Saturation) obj; + return Objects.equals(pivot, that.pivot); + } + + @Override + public int hashCode() { + return Objects.hashCode(pivot); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 1); + out.writeOptionalFloat(pivot); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("saturation"); + if (pivot != null) { + builder.field("pivot", pivot); + } + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (pivot == null) { + return FeatureField.newSaturationQuery("_feature", feature); + } else { + return FeatureField.newSaturationQuery("_feature", feature, DEFAULT_BOOST, pivot); + } + } + } + + /** + * A scoring function that scores documents as {@code S^exp / (S^exp + pivot^exp)} + * where S is the value of the static feature. + */ + public static class Sigmoid extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "sigmoid", a -> new Sigmoid((Float) a[0], ((Float) a[1]).floatValue())); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("pivot")); + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("exponent")); + } + + private final float pivot; + private final float exp; + + public Sigmoid(float pivot, float exp) { + this.pivot = pivot; + this.exp = exp; + } + + private Sigmoid(StreamInput in) throws IOException { + this(in.readFloat(), in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Sigmoid that = (Sigmoid) obj; + return pivot == that.pivot + && exp == that.exp; + } + + @Override + public int hashCode() { + return Objects.hash(pivot, exp); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 2); + out.writeFloat(pivot); + out.writeFloat(exp); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("sigmoid"); + builder.field("pivot", pivot); + builder.field("exponent", exp); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + return FeatureField.newSigmoidQuery("_feature", feature, DEFAULT_BOOST, pivot, exp); + } + } + } + + private static ScoreFunction readScoreFunction(StreamInput in) throws IOException { + byte b = in.readByte(); + switch (b) { + case 0: + return new ScoreFunction.Log(in); + case 1: + return new ScoreFunction.Saturation(in); + case 2: + return new ScoreFunction.Sigmoid(in); + default: + throw new IOException("Illegal score function id: " + b); + } + } + + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature", args -> { + final String field = (String) args[0]; + final float boost = args[1] == null ? DEFAULT_BOOST : (Float) args[1]; + final String queryName = (String) args[2]; + long numNonNulls = Arrays.stream(args, 3, args.length).filter(Objects::nonNull).count(); + final FeatureQueryBuilder query; + if (numNonNulls > 1) { + throw new IllegalArgumentException("Can only specify one of [log], [saturation] and [sigmoid]"); + } else if (numNonNulls == 0) { + query = new FeatureQueryBuilder(field, new ScoreFunction.Saturation()); + } else { + ScoreFunction scoreFunction = (ScoreFunction) Arrays.stream(args, 3, args.length) + .filter(Objects::nonNull) + .findAny() + .get(); + query = new FeatureQueryBuilder(field, scoreFunction); + } + query.boost(boost); + query.queryName(queryName); + return query; + }); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("field")); + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), BOOST_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Log.PARSER, new ParseField("log")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Saturation.PARSER, new ParseField("saturation")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Sigmoid.PARSER, new ParseField("sigmoid")); + } + + public static final String NAME = "feature"; + + private final String field; + private final ScoreFunction scoreFunction; + + public FeatureQueryBuilder(String field, ScoreFunction scoreFunction) { + this.field = Objects.requireNonNull(field); + this.scoreFunction = Objects.requireNonNull(scoreFunction); + } + + public FeatureQueryBuilder(StreamInput in) throws IOException { + super(in); + this.field = in.readString(); + this.scoreFunction = readScoreFunction(in); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(field); + scoreFunction.writeTo(out); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getName()); + builder.field("field", field); + scoreFunction.doXContent(builder); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + final MappedFieldType ft = context.fieldMapper(field); + if (ft == null) { + return new MatchNoDocsQuery(); + } + if (ft instanceof FeatureFieldType == false) { + throw new IllegalArgumentException("[feature] query only works on [feature] fields, not [" + ft.typeName() + "]"); + } + final FeatureFieldType fft = (FeatureFieldType) ft; + return scoreFunction.toQuery(field, fft.positiveScoreImpact()); + } + + @Override + protected boolean doEquals(FeatureQueryBuilder other) { + return Objects.equals(field, other.field) && Objects.equals(scoreFunction, other.scoreFunction); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, scoreFunction); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java new file mode 100644 index 0000000000000..2e9fa98cbbe97 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class FeatureFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + private static int getFrequency(TokenStream tk) throws IOException { + TermFrequencyAttribute freqAttribute = tk.addAttribute(TermFrequencyAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + int freq = freqAttribute.getTermFrequency(); + assertFalse(tk.incrementToken()); + return freq; + } + + public void testDefaults() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); + } + + public void testNegativeScoreImpact() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature") + .field("positive_score_impact", false).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 > freq2); + } + + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + MapperParsingException e = null;/*expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", Arrays.asList(10, 20)) + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [field] in the same document", + e.getCause().getMessage());*/ + + e = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startArray("foo") + .startObject() + .field("field", 10) + .endObject() + .startObject() + .field("field", 20) + .endObject() + .endArray() + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [foo.field] in the same document", + e.getCause().getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java new file mode 100644 index 0000000000000..9debd0736602c --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.junit.Before; + +public class FeatureFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureFieldMapper.FeatureFieldType(); + } + + @Before + public void setupProperties() { + addModifier(new Modifier("positive_score_impact", false) { + @Override + public void modify(MappedFieldType ft) { + FeatureFieldMapper.FeatureFieldType tft = (FeatureFieldMapper.FeatureFieldType)ft; + tft.setPositiveScoreImpact(tft.positiveScoreImpact() == false); + } + @Override + public void normalizeOther(MappedFieldType other) { + super.normalizeOther(other); + ((FeatureFieldMapper.FeatureFieldType) other).setPositiveScoreImpact(true); + } + }); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java new file mode 100644 index 0000000000000..99697b1abaf58 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.util.Collection; + +public class FeatureMetaFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testBasics() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + assertNotNull(mapper.metadataMapper(FeatureMetaFieldMapper.class)); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java new file mode 100644 index 0000000000000..ef261573c9682 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +public class FeatureMetaFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureMetaFieldMapper.FeatureMetaFieldType(); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java new file mode 100644 index 0000000000000..883dce5f3858c --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.MapperExtrasPlugin; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.FeatureQueryBuilder.ScoreFunction; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.either; + +public class FeatureQueryBuilderTests extends AbstractQueryTestCase { + + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + for (String type : getCurrentTypes()) { + mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type, + "my_feature_field", "type=feature", + "my_negative_feature_field", "type=feature,positive_score_impact=false"))), MapperService.MergeReason.MAPPING_UPDATE); + } + } + + @Override + protected Collection> getPlugins() { + return Collections.singleton(MapperExtrasPlugin.class); + } + + @Override + protected FeatureQueryBuilder doCreateTestQueryBuilder() { + ScoreFunction function; + switch (random().nextInt(3)) { + case 0: + function = new ScoreFunction.Log(1 + randomFloat()); + break; + case 1: + if (randomBoolean()) { + function = new ScoreFunction.Saturation(); + } else { + function = new ScoreFunction.Saturation(randomFloat()); + } + break; + case 2: + function = new ScoreFunction.Sigmoid(randomFloat(), randomFloat()); + break; + default: + throw new AssertionError(); + } + return new FeatureQueryBuilder("my_feature_field", function); + } + + @Override + protected void doAssertLuceneQuery(FeatureQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + Class expectedClass = FeatureField.newSaturationQuery("", "", 1, 1).getClass(); + assertThat(query, either(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(expectedClass))); + } + + @Override + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/30605") + public void testUnknownField() { + super.testUnknownField(); + } + + public void testDefaultScoreFunction() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_feature_field\"\n" + + " }\n" + + "}"; + Query parsedQuery = parseQuery(query).toQuery(createShardContext()); + assertEquals(FeatureField.newSaturationQuery("_feature", "my_feature_field"), parsedQuery); + } + + public void testIllegalField() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"" + STRING_FIELD_NAME + "\"\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals("[feature] query only works on [feature] fields, not [text]", e.getMessage()); + } + + public void testIllegalCombination() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_negative_feature_field\",\n" + + " \"log\" : {\n" + + " \"scaling_factor\": 4.5\n" + + " }\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals( + "Cannot use the [log] function with a field that has a negative score impact as it would trigger negative scores", + e.getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml new file mode 100644 index 0000000000000..8318550876509 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml @@ -0,0 +1,160 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "The feature field/query was introduced in 7.0.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + pagerank: + type: feature + url_length: + type: feature + positive_score_impact: false + + - do: + index: + index: test + type: _doc + id: 1 + body: + pagerank: 10 + url_length: 50 + + - do: + index: + index: test + type: _doc + id: 2 + body: + pagerank: 100 + url_length: 20 + + - do: + indices.refresh: {} + +--- +"Positive log": + + - do: + search: + body: + query: + feature: + field: pagerank + log: + scaling_factor: 3 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive saturation": + + - do: + search: + body: + query: + feature: + field: pagerank + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive sigmoid": + + - do: + search: + body: + query: + feature: + field: pagerank + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative log": + + - do: + catch: bad_request + search: + body: + query: + feature: + field: url_length + log: + scaling_factor: 3 + +--- +"Negative saturation": + + - do: + search: + body: + query: + feature: + field: url_length + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative sigmoid": + + - do: + search: + body: + query: + feature: + field: url_length + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 6e39a7f50d2cd..cb31d44454452 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -59,29 +58,24 @@ final class Netty4HttpChannel extends AbstractRestChannel { private final Netty4HttpServerTransport transport; private final Channel channel; private final FullHttpRequest nettyRequest; - private final HttpPipelinedRequest pipelinedRequest; + private final int sequence; private final ThreadContext threadContext; private final HttpHandlingSettings handlingSettings; /** - * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. - * @param request The request that is handled by this channel. - * @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if - * HTTP pipelining is disabled. - * @param handlingSettings true iff error messages should include stack traces. - * @param threadContext the thread context for the channel + * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. + * @param request The request that is handled by this channel. + * @param sequence The pipelining sequence number for this request + * @param handlingSettings true if error messages should include stack traces. + * @param threadContext the thread context for the channel */ - Netty4HttpChannel( - final Netty4HttpServerTransport transport, - final Netty4HttpRequest request, - final HttpPipelinedRequest pipelinedRequest, - final HttpHandlingSettings handlingSettings, - final ThreadContext threadContext) { + Netty4HttpChannel(Netty4HttpServerTransport transport, Netty4HttpRequest request, int sequence, HttpHandlingSettings handlingSettings, + ThreadContext threadContext) { super(request, handlingSettings.getDetailedErrorsEnabled()); this.transport = transport; this.channel = request.getChannel(); this.nettyRequest = request.request(); - this.pipelinedRequest = pipelinedRequest; + this.sequence = sequence; this.threadContext = threadContext; this.handlingSettings = handlingSettings; } @@ -129,7 +123,7 @@ public void sendResponse(RestResponse response) { final ChannelPromise promise = channel.newPromise(); if (releaseContent) { - promise.addListener(f -> ((Releasable)content).close()); + promise.addListener(f -> ((Releasable) content).close()); } if (releaseBytesStreamOutput) { @@ -140,13 +134,9 @@ public void sendResponse(RestResponse response) { promise.addListener(ChannelFutureListener.CLOSE); } - final Object msg; - if (pipelinedRequest != null) { - msg = pipelinedRequest.createHttpResponse(resp, promise); - } else { - msg = resp; - } - channel.writeAndFlush(msg, promise); + Netty4HttpResponse newResponse = new Netty4HttpResponse(sequence, resp); + + channel.writeAndFlush(newResponse, promise); releaseContent = false; releaseBytesStreamOutput = false; } finally { @@ -156,9 +146,6 @@ public void sendResponse(RestResponse response) { if (releaseBytesStreamOutput) { bytesOutputOrNull().close(); } - if (pipelinedRequest != null) { - pipelinedRequest.release(); - } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java new file mode 100644 index 0000000000000..52dfbff6d3e03 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.LastHttpContent; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipeliningAggregator; +import org.elasticsearch.transport.netty4.Netty4Utils; + +import java.nio.channels.ClosedChannelException; +import java.util.Collections; +import java.util.List; + +/** + * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. + */ +public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler { + + private final Logger logger; + private final HttpPipeliningAggregator aggregator; + + /** + * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. + * + * @param logger for logging unexpected errors + * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is + * required as events cannot queue up indefinitely + */ + public Netty4HttpPipeliningHandler(Logger logger, final int maxEventsHeld) { + this.logger = logger; + this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld); + } + + @Override + public void channelRead(final ChannelHandlerContext ctx, final Object msg) { + if (msg instanceof LastHttpContent) { + HttpPipelinedRequest pipelinedRequest = aggregator.read(((LastHttpContent) msg)); + ctx.fireChannelRead(pipelinedRequest); + } else { + ctx.fireChannelRead(msg); + } + } + + @Override + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { + assert msg instanceof Netty4HttpResponse : "Message must be type: " + Netty4HttpResponse.class; + Netty4HttpResponse response = (Netty4HttpResponse) msg; + boolean success = false; + try { + List> readyResponses = aggregator.write(response, promise); + for (Tuple readyResponse : readyResponses) { + ctx.write(readyResponse.v1().getResponse(), readyResponse.v2()); + } + success = true; + } catch (IllegalStateException e) { + ctx.channel().close(); + } finally { + if (success == false) { + promise.setFailure(new ClosedChannelException()); + } + } + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise promise) { + List> inflightResponses = aggregator.removeAllInflightResponses(); + + if (inflightResponses.isEmpty() == false) { + ClosedChannelException closedChannelException = new ClosedChannelException(); + for (Tuple inflightResponse : inflightResponses) { + try { + inflightResponse.v2().setFailure(closedChannelException); + } catch (RuntimeException e) { + logger.error("unexpected error while releasing pipelined http responses", e); + } + } + } + ctx.close(promise); + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 74429c8dda9b7..c3a010226a408 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -30,41 +30,30 @@ import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Collections; @ChannelHandler.Sharable -class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { +class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { private final Netty4HttpServerTransport serverTransport; private final HttpHandlingSettings handlingSettings; - private final boolean httpPipeliningEnabled; private final ThreadContext threadContext; Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings, ThreadContext threadContext) { this.serverTransport = serverTransport; - this.httpPipeliningEnabled = serverTransport.pipelining; this.handlingSettings = handlingSettings; this.threadContext = threadContext; } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { - final FullHttpRequest request; - final HttpPipelinedRequest pipelinedRequest; - if (this.httpPipeliningEnabled && msg instanceof HttpPipelinedRequest) { - pipelinedRequest = (HttpPipelinedRequest) msg; - request = (FullHttpRequest) pipelinedRequest.last(); - } else { - pipelinedRequest = null; - request = (FullHttpRequest) msg; - } + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) throws Exception { + final FullHttpRequest request = msg.getRequest(); - boolean success = false; try { final FullHttpRequest copy = @@ -111,7 +100,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except Netty4HttpChannel innerChannel; try { innerChannel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext); + new Netty4HttpChannel(serverTransport, httpRequest, msg.getSequence(), handlingSettings, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -126,7 +115,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except copy, ctx.channel()); innerChannel = - new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext); + new Netty4HttpChannel(serverTransport, innerRequest, msg.getSequence(), handlingSettings, threadContext); } channel = innerChannel; } @@ -138,12 +127,9 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except } else { serverTransport.dispatchRequest(httpRequest, channel); } - success = true; } finally { - // the request is otherwise released in case of dispatch - if (success == false && pipelinedRequest != null) { - pipelinedRequest.release(); - } + // As we have copied the buffer, we can release the request + request.release(); } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java new file mode 100644 index 0000000000000..779c9125a2e42 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.http.HttpPipelinedMessage; + +public class Netty4HttpResponse extends HttpPipelinedMessage { + + private final FullHttpResponse response; + + public Netty4HttpResponse(int sequence, FullHttpResponse response) { + super(sequence); + this.response = response; + } + + public FullHttpResponse getResponse() { + return response; + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 8e5bace46aa7e..45e889797bde4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -62,7 +62,6 @@ import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; @@ -99,7 +98,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN; @@ -162,8 +160,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { protected final int workerCount; - protected final boolean pipelining; - protected final int pipeliningMaxEvents; /** @@ -204,6 +200,7 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), Math.toIntExact(maxChunkSize.getBytes()), Math.toIntExact(maxHeaderSize.getBytes()), @@ -211,7 +208,8 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic SETTING_HTTP_RESET_COOKIES.get(settings), SETTING_HTTP_COMPRESSION.get(settings), SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + pipeliningMaxEvents); this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); @@ -226,14 +224,12 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt()); - this.pipelining = SETTING_PIPELINING.get(settings); - this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + - "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents, - pipelining, pipeliningMaxEvents); + "receive_predictor[{}], max_composite_buffer_components[{}], pipelining_max_events[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, receivePredictor, maxCompositeBufferComponents, + pipeliningMaxEvents); } public Settings settings() { @@ -452,9 +448,7 @@ protected void initChannel(Channel ch) throws Exception { if (SETTING_CORS_ENABLED.get(transport.settings())) { ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig())); } - if (transport.pipelining) { - ch.pipeline().addLast("pipelining", new HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); - } + ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); ch.pipeline().addLast("handler", requestHandler); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java deleted file mode 100644 index be1669c60c297..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.netty4.pipelining; - -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.LastHttpContent; -import io.netty.util.ReferenceCounted; - -/** - * Permits downstream channel events to be ordered and signalled as to whether more are to come for - * a given sequence. - */ -public class HttpPipelinedRequest implements ReferenceCounted { - - private final LastHttpContent last; - private final int sequence; - - public HttpPipelinedRequest(final LastHttpContent last, final int sequence) { - this.last = last; - this.sequence = sequence; - } - - public LastHttpContent last() { - return last; - } - - public HttpPipelinedResponse createHttpResponse(final FullHttpResponse response, final ChannelPromise promise) { - return new HttpPipelinedResponse(response, promise, sequence); - } - - @Override - public int refCnt() { - return last.refCnt(); - } - - @Override - public ReferenceCounted retain() { - last.retain(); - return this; - } - - @Override - public ReferenceCounted retain(int increment) { - last.retain(increment); - return this; - } - - @Override - public ReferenceCounted touch() { - last.touch(); - return this; - } - - @Override - public ReferenceCounted touch(Object hint) { - last.touch(hint); - return this; - } - - @Override - public boolean release() { - return last.release(); - } - - @Override - public boolean release(int decrement) { - return last.release(decrement); - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java deleted file mode 100644 index 6b6db94d69a59..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java +++ /dev/null @@ -1,94 +0,0 @@ -package org.elasticsearch.http.netty4.pipelining; - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.util.ReferenceCounted; - -class HttpPipelinedResponse implements Comparable, ReferenceCounted { - - private final FullHttpResponse response; - private final ChannelPromise promise; - private final int sequence; - - HttpPipelinedResponse(FullHttpResponse response, ChannelPromise promise, int sequence) { - this.response = response; - this.promise = promise; - this.sequence = sequence; - } - - public FullHttpResponse response() { - return response; - } - - public ChannelPromise promise() { - return promise; - } - - public int sequence() { - return sequence; - } - - @Override - public int compareTo(HttpPipelinedResponse o) { - return Integer.compare(sequence, o.sequence); - } - - @Override - public int refCnt() { - return response.refCnt(); - } - - @Override - public ReferenceCounted retain() { - response.retain(); - return this; - } - - @Override - public ReferenceCounted retain(int increment) { - response.retain(increment); - return this; - } - - @Override - public ReferenceCounted touch() { - response.touch(); - return this; - } - - @Override - public ReferenceCounted touch(Object hint) { - response.touch(hint); - return this; - } - - @Override - public boolean release() { - return response.release(); - } - - @Override - public boolean release(int decrement) { - return response.release(decrement); - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java deleted file mode 100644 index a90027c81482b..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.netty4.pipelining; - -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.LastHttpContent; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.transport.netty4.Netty4Utils; - -import java.nio.channels.ClosedChannelException; -import java.util.Collections; -import java.util.PriorityQueue; - -/** - * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. - */ -public class HttpPipeliningHandler extends ChannelDuplexHandler { - - // we use a priority queue so that responses are ordered by their sequence number - private final PriorityQueue holdingQueue; - - private final Logger logger; - private final int maxEventsHeld; - - /* - * The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the - * channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the - * current write sequence, implying that all preceding messages have been written. - */ - private int readSequence; - private int writeSequence; - - /** - * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. - * - * @param logger for logging unexpected errors - * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is - * required as events cannot queue up indefinitely - */ - public HttpPipeliningHandler(Logger logger, final int maxEventsHeld) { - this.logger = logger; - this.maxEventsHeld = maxEventsHeld; - this.holdingQueue = new PriorityQueue<>(1); - } - - @Override - public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception { - if (msg instanceof LastHttpContent) { - ctx.fireChannelRead(new HttpPipelinedRequest(((LastHttpContent) msg).retain(), readSequence++)); - } else { - ctx.fireChannelRead(msg); - } - } - - @Override - public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception { - if (msg instanceof HttpPipelinedResponse) { - final HttpPipelinedResponse current = (HttpPipelinedResponse) msg; - /* - * We attach the promise to the response. When we invoke a write on the channel with the response, we must ensure that we invoke - * the write methods that accept the same promise that we have attached to the response otherwise as the response proceeds - * through the handler pipeline a different promise will be used until reaching this handler. Therefore, we assert here that the - * attached promise is identical to the provided promise as a safety mechanism that we are respecting this. - */ - assert current.promise() == promise; - - boolean channelShouldClose = false; - - synchronized (holdingQueue) { - if (holdingQueue.size() < maxEventsHeld) { - holdingQueue.add(current); - - while (!holdingQueue.isEmpty()) { - /* - * Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence - * number does not match the current write sequence number then we have not processed all preceding responses yet. - */ - final HttpPipelinedResponse top = holdingQueue.peek(); - if (top.sequence() != writeSequence) { - break; - } - holdingQueue.remove(); - /* - * We must use the promise attached to the response; this is necessary since are going to hold a response until all - * responses that precede it in the pipeline are written first. Note that the promise from the method invocation is - * not ignored, it will already be attached to an existing response and consumed when that response is drained. - */ - ctx.write(top.response(), top.promise()); - writeSequence++; - } - } else { - channelShouldClose = true; - } - } - - if (channelShouldClose) { - try { - Netty4Utils.closeChannels(Collections.singletonList(ctx.channel())); - } finally { - current.release(); - promise.setSuccess(); - } - } - } else { - ctx.write(msg, promise); - } - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - if (holdingQueue.isEmpty() == false) { - ClosedChannelException closedChannelException = new ClosedChannelException(); - HttpPipelinedResponse pipelinedResponse; - while ((pipelinedResponse = holdingQueue.poll()) != null) { - try { - pipelinedResponse.release(); - pipelinedResponse.promise().setFailure(closedChannelException); - } catch (Exception e) { - logger.error("unexpected error while releasing pipelined http responses", e); - } - } - } - ctx.close(promise); - } -} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 0ef1ea585b11c..7c5b35a322996 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -60,7 +60,6 @@ import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestResponse; @@ -212,12 +211,12 @@ public void testHeadersSet() { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; // send a response Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); TestResponse resp = new TestResponse(); final String customHeader = "custom-header"; final String customHeaderValue = "xyz"; @@ -227,7 +226,7 @@ public void testHeadersSet() { // inspect what was written List writtenObjects = writeCapturingChannel.getWrittenObjects(); assertThat(writtenObjects.size(), is(1)); - HttpResponse response = (HttpResponse) writtenObjects.get(0); + HttpResponse response = ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); assertThat(response.headers().get("non-existent-header"), nullValue()); assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); @@ -243,10 +242,9 @@ public void testReleaseOnSendToClosedChannel() { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final TestResponse response = new TestResponse(bigArrays); assertThat(response.content(), instanceOf(Releasable.class)); embeddedChannel.close(); @@ -263,10 +261,9 @@ public void testReleaseOnSendToChannelAfterException() throws IOException { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, JsonXContent.contentBuilder().startObject().endObject()); assertThat(response.content(), not(instanceOf(Releasable.class))); @@ -312,7 +309,7 @@ public void testConnectionClose() throws Exception { assertTrue(embeddedChannel.isOpen()); HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final TestResponse resp = new TestResponse(); channel.sendResponse(resp); assertThat(embeddedChannel.isOpen(), equalTo(!close)); @@ -340,13 +337,13 @@ private FullHttpResponse executeRequest(final Settings settings, final String or HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); channel.sendResponse(new TestResponse()); // get the response List writtenObjects = writeCapturingChannel.getWrittenObjects(); assertThat(writtenObjects.size(), is(1)); - return (FullHttpResponse) writtenObjects.get(0); + return ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java similarity index 83% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java rename to modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java index ffb6c8fb3569d..21151304424c1 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.http.netty4.pipelining; +package org.elasticsearch.http.netty4; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; @@ -37,6 +37,7 @@ import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import org.elasticsearch.common.Randomness; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -62,7 +63,8 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase { - private final ExecutorService executorService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1); private final Map waitingRequests = new ConcurrentHashMap<>(); private final Map finishingRequests = new ConcurrentHashMap<>(); @@ -79,15 +81,19 @@ private CountDownLatch finishRequest(String url) { } private void shutdownExecutorService() throws InterruptedException { - if (!executorService.isShutdown()) { - executorService.shutdown(); - executorService.awaitTermination(10, TimeUnit.SECONDS); + if (!handlerService.isShutdown()) { + handlerService.shutdown(); + handlerService.awaitTermination(10, TimeUnit.SECONDS); + } + if (!eventLoopService.isShutdown()) { + eventLoopService.shutdown(); + eventLoopService.awaitTermination(10, TimeUnit.SECONDS); } } public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -114,7 +120,7 @@ public void testThatPipeliningWorksWithFastSerializedRequests() throws Interrupt public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -147,7 +153,7 @@ public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedExcep final EmbeddedChannel embeddedChannel = new EmbeddedChannel( new AggregateUrisAndHeadersHandler(), - new HttpPipeliningHandler(logger, numberOfRequests), + new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -176,7 +182,7 @@ public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedExcep public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < 1 + numberOfRequests + 1; i++) { @@ -184,7 +190,7 @@ public void testThatPipeliningClosesConnectionWithTooManyEvents() throws Interru } final List latches = new ArrayList<>(); - final List requests = IntStream.range(1, numberOfRequests + 1).mapToObj(r -> r).collect(Collectors.toList()); + final List requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList()); Randomness.shuffle(requests); for (final Integer request : requests) { @@ -205,25 +211,26 @@ public void testThatPipeliningClosesConnectionWithTooManyEvents() throws Interru public void testPipeliningRequestsAreReleased() throws InterruptedException { final int numberOfRequests = 10; final EmbeddedChannel embeddedChannel = - new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests + 1)); + new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests + 1)); for (int i = 0; i < numberOfRequests; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + i)); } - HttpPipelinedRequest inbound; - ArrayList requests = new ArrayList<>(); + HttpPipelinedRequest inbound; + ArrayList> requests = new ArrayList<>(); while ((inbound = embeddedChannel.readInbound()) != null) { requests.add(inbound); } ArrayList promises = new ArrayList<>(); for (int i = 1; i < requests.size(); ++i) { - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); + final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); ChannelPromise promise = embeddedChannel.newPromise(); promises.add(promise); - HttpPipelinedResponse response = requests.get(i).createHttpResponse(httpResponse, promise); - embeddedChannel.writeAndFlush(response, promise); + int sequence = requests.get(i).getSequence(); + Netty4HttpResponse resp = new Netty4HttpResponse(sequence, httpResponse); + embeddedChannel.writeAndFlush(resp, promise); } for (ChannelPromise promise : promises) { @@ -260,14 +267,14 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) thro } - private class WorkEmulatorHandler extends SimpleChannelInboundHandler { + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { @Override - protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelinedRequest pipelinedRequest) throws Exception { + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + LastHttpContent request = pipelinedRequest.getRequest(); final QueryStringDecoder decoder; - if (pipelinedRequest.last() instanceof FullHttpRequest) { - final FullHttpRequest fullHttpRequest = (FullHttpRequest) pipelinedRequest.last(); - decoder = new QueryStringDecoder(fullHttpRequest.uri()); + if (request instanceof FullHttpRequest) { + decoder = new QueryStringDecoder(((FullHttpRequest)request).uri()); } else { decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll()); } @@ -282,12 +289,14 @@ protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelined final CountDownLatch finishingLatch = new CountDownLatch(1); finishingRequests.put(uri, finishingLatch); - executorService.submit(() -> { + handlerService.submit(() -> { try { waitingLatch.await(1000, TimeUnit.SECONDS); final ChannelPromise promise = ctx.newPromise(); - ctx.write(pipelinedRequest.createHttpResponse(httpResponse, promise), promise); - finishingLatch.countDown(); + eventLoopService.submit(() -> { + ctx.write(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + finishingLatch.countDown(); + }); } catch (InterruptedException e) { fail(e.toString()); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 0eb14a8a76e9b..f2b28b909187b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -38,9 +38,9 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.NullDispatcher; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -52,16 +52,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.hasSize; /** * This test just tests, if he pipelining works in general with out any connection the Elasticsearch handler @@ -85,9 +80,8 @@ public void shutdown() throws Exception { } } - public void testThatHttpPipeliningWorksWhenEnabled() throws Exception { + public void testThatHttpPipeliningWorks() throws Exception { final Settings settings = Settings.builder() - .put("http.pipelining", true) .put("http.port", "0") .build(); try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { @@ -112,48 +106,6 @@ public void testThatHttpPipeliningWorksWhenEnabled() throws Exception { } } - public void testThatHttpPipeliningCanBeDisabled() throws Exception { - final Settings settings = Settings.builder() - .put("http.pipelining", false) - .put("http.port", "0") - .build(); - try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { - httpServerTransport.start(); - final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - - final int numberOfRequests = randomIntBetween(4, 16); - final Set slowIds = new HashSet<>(); - final List requests = new ArrayList<>(numberOfRequests); - for (int i = 0; i < numberOfRequests; i++) { - if (rarely()) { - requests.add("/slow/" + i); - slowIds.add(i); - } else { - requests.add("/" + i); - } - } - - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{})); - List responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses)); - // we can not be sure about the order of the responses, but the slow ones should come last - assertThat(responseBodies, hasSize(numberOfRequests)); - for (int i = 0; i < numberOfRequests - slowIds.size(); i++) { - assertThat(responseBodies.get(i), matches("/\\d+")); - } - - final Set ids = new HashSet<>(); - for (int i = 0; i < slowIds.size(); i++) { - final String response = responseBodies.get(numberOfRequests - slowIds.size() + i); - assertThat(response, matches("/slow/\\d+" )); - assertTrue(ids.add(Integer.parseInt(response.split("/")[2]))); - } - - assertThat(slowIds, equalTo(ids)); - } - } - } - class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { private final ExecutorService executorService = Executors.newCachedThreadPool(); @@ -196,7 +148,7 @@ protected void initChannel(Channel ch) throws Exception { } - class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { + class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler> { private final ExecutorService executorService; @@ -205,7 +157,7 @@ class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) throws Exception { executorService.submit(new PossiblySlowRunnable(ctx, msg)); } @@ -220,26 +172,18 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E class PossiblySlowRunnable implements Runnable { private ChannelHandlerContext ctx; - private HttpPipelinedRequest pipelinedRequest; + private HttpPipelinedRequest pipelinedRequest; private FullHttpRequest fullHttpRequest; - PossiblySlowRunnable(ChannelHandlerContext ctx, Object msg) { + PossiblySlowRunnable(ChannelHandlerContext ctx, HttpPipelinedRequest msg) { this.ctx = ctx; - if (msg instanceof HttpPipelinedRequest) { - this.pipelinedRequest = (HttpPipelinedRequest) msg; - } else if (msg instanceof FullHttpRequest) { - this.fullHttpRequest = (FullHttpRequest) msg; - } + this.pipelinedRequest = msg; + this.fullHttpRequest = pipelinedRequest.getRequest(); } @Override public void run() { - final String uri; - if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) { - uri = ((FullHttpRequest) pipelinedRequest.last()).uri(); - } else { - uri = fullHttpRequest.uri(); - } + final String uri = fullHttpRequest.uri(); final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); @@ -258,13 +202,7 @@ public void run() { } final ChannelPromise promise = ctx.newPromise(); - final Object msg; - if (pipelinedRequest != null) { - msg = pipelinedRequest.createHttpResponse(httpResponse, promise); - } else { - msg = httpResponse; - } - ctx.writeAndFlush(msg, promise); + ctx.writeAndFlush(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java similarity index 87% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java rename to modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java index 9723ee93faf59..ebb91d9663ed5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java @@ -21,8 +21,6 @@ import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -35,21 +33,13 @@ import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase { +public class Netty4PipeliningIT extends ESNetty4IntegTestCase { @Override protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("http.pipelining", true) - .build(); - } - public void testThatNettyHttpServerSupportsPipelining() throws Exception { String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"}; diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index f99b0177de590..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -473a7f4d955f132bb498482648266653f8da85bd \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..781b814c99e45 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +452c9a9f86b79b9b3eaa7d6aa782e189d5bcfe8f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 08269eed6360f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a72b9a790e2552248c8bbb36af47c4c399ba27 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..baba08978587f --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +48c76a922bdfc7f50b1b6fe22e9456c555f3f990 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 325fe16120428..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14f680ab9b886c7c5224ff682a7fa70b6df44a05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..da19e1c3857a5 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +4db5777df468b0867ff6539c9ab687e0ed6cab41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 9e88119ed1d16..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e033c68c9ec1ba9cd8439758adf7eb5fee22acef \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..148b5425d64b1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +0e09e6b011ab2b1a0e3e0e1df2ab2a91dca8ba23 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 74721c857571c..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08df0a5029f11c109b22064dec78c05dfa25f9e3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..bce84d16a9a3d --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +ceefa0f9789ab9ea5c8ab9f67ed7a601a3ae6aa9 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 1c257797c08e2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9d1819b2b13f134f6a605ab5a59ce3c602c0460 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..762c56f77001f --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +b013adc183e52a74795ad3d3032f4d0f9db30b73 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 117ac05c91fe1..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47bc91ccb0cdf0c1c404646ffe0d5fd6b020a4ab \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..7631bea25691f --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +95300f29418f60e57e022d934d3462be9e1e2225 \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index f1d18ddacbd13..e3481e3c254d2 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -25,20 +25,21 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.WriteOperation; import org.elasticsearch.rest.RestRequest; @@ -77,6 +78,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler { if (settings.isCompression()) { handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); } + handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); adaptor.addCloseListener((v, e) -> nioChannel.close()); @@ -95,9 +97,9 @@ public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { @Override public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { - assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class - + ". Found type: " + message.getClass() + "."; - return new HttpWriteOperation(context, (FullHttpResponse) message, listener); + assert message instanceof NioHttpResponse : "This channel only supports messages that are of type: " + + NioHttpResponse.class + ". Found type: " + message.getClass() + "."; + return new HttpWriteOperation(context, (NioHttpResponse) message, listener); } @Override @@ -125,76 +127,85 @@ public void close() throws IOException { } } + @SuppressWarnings("unchecked") private void handleRequest(Object msg) { - final FullHttpRequest request = (FullHttpRequest) msg; + final HttpPipelinedRequest pipelinedRequest = (HttpPipelinedRequest) msg; + FullHttpRequest request = pipelinedRequest.getRequest(); - final FullHttpRequest copiedRequest = - new DefaultFullHttpRequest( - request.protocolVersion(), - request.method(), - request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - - Exception badRequestCause = null; - - /* - * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there - * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we - * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, - * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the - * underlying exception that caused us to treat the request as bad. - */ - final NioHttpRequest httpRequest; - { - NioHttpRequest innerHttpRequest; - try { - innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); - } catch (final RestRequest.ContentTypeHeaderException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); - } catch (final RestRequest.BadParameterException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutParameters(copiedRequest); + try { + final FullHttpRequest copiedRequest = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final NioHttpRequest httpRequest; + { + NioHttpRequest innerHttpRequest; + try { + innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copiedRequest); + } + httpRequest = innerHttpRequest; } - httpRequest = innerHttpRequest; - } - /* - * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid - * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an - * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these - * parameter values. - */ - final NioHttpChannel channel; - { - NioHttpChannel innerChannel; - try { - innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext); - } catch (final IllegalArgumentException e) { - if (badRequestCause == null) { - badRequestCause = e; - } else { - badRequestCause.addSuppressed(e); + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of + * these parameter values. + */ + final NioHttpChannel channel; + { + NioHttpChannel innerChannel; + int sequence = pipelinedRequest.getSequence(); + BigArrays bigArrays = transport.getBigArrays(); + try { + innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final NioHttpRequest innerRequest = + new NioHttpRequest( + xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copiedRequest.uri(), + copiedRequest); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext); } - final NioHttpRequest innerRequest = - new NioHttpRequest( - xContentRegistry, - Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters - copiedRequest.uri(), - copiedRequest); - innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext); + channel = innerChannel; } - channel = innerChannel; - } - if (request.decoderResult().isFailure()) { - transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } else if (badRequestCause != null) { - transport.dispatchBadRequest(httpRequest, channel, badRequestCause); - } else { - transport.dispatchRequest(httpRequest, channel); + if (request.decoderResult().isFailure()) { + transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + transport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + transport.dispatchRequest(httpRequest, channel); + } + } finally { + // As we have copied the buffer, we can release the request + request.release(); } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java index c838ae85e9d40..8ddce7a5b73b5 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java @@ -19,7 +19,6 @@ package org.elasticsearch.http.nio; -import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.WriteOperation; @@ -28,10 +27,10 @@ public class HttpWriteOperation implements WriteOperation { private final SocketChannelContext channelContext; - private final FullHttpResponse response; + private final NioHttpResponse response; private final BiConsumer listener; - HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer listener) { + HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer listener) { this.channelContext = channelContext; this.response = response; this.listener = listener; @@ -48,7 +47,7 @@ public SocketChannelContext getChannel() { } @Override - public FullHttpResponse getObject() { + public NioHttpResponse getObject() { return response; } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index 3344a31264121..cf8c92bff905c 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -53,12 +53,7 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) try { ByteBuf message = (ByteBuf) msg; promise.addListener((f) -> message.release()); - NettyListener listener; - if (promise instanceof NettyListener) { - listener = (NettyListener) promise; - } else { - listener = new NettyListener(promise); - } + NettyListener listener = NettyListener.fromChannelPromise(promise); flushOperations.add(new FlushOperation(message.nioBuffers(), listener)); } catch (Exception e) { promise.setFailure(e); @@ -107,18 +102,7 @@ public Object pollInboundMessage() { } public void write(WriteOperation writeOperation) { - ChannelPromise channelPromise = nettyChannel.newPromise(); - channelPromise.addListener(f -> { - BiConsumer consumer = writeOperation.getListener(); - if (f.cause() == null) { - consumer.accept(null, null); - } else { - ExceptionsHelper.dieOnError(f.cause()); - consumer.accept(null, f.cause()); - } - }); - - nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise)); + nettyChannel.writeAndFlush(writeOperation.getObject(), NettyListener.fromBiConsumer(writeOperation.getListener(), nettyChannel)); } public FlushOperation pollOutboundOperation() { diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java index e806b0d23ce3a..b907c0f2bc6f6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -23,7 +23,7 @@ import io.netty.channel.ChannelPromise; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.util.concurrent.FutureUtils; import java.util.concurrent.ExecutionException; @@ -40,7 +40,7 @@ public class NettyListener implements BiConsumer, ChannelPromis private final ChannelPromise promise; - NettyListener(ChannelPromise promise) { + private NettyListener(ChannelPromise promise) { this.promise = promise; } @@ -211,4 +211,30 @@ public boolean isVoid() { public ChannelPromise unvoid() { return promise.unvoid(); } + + public static NettyListener fromBiConsumer(BiConsumer biConsumer, Channel channel) { + if (biConsumer instanceof NettyListener) { + return (NettyListener) biConsumer; + } else { + ChannelPromise channelPromise = channel.newPromise(); + channelPromise.addListener(f -> { + if (f.cause() == null) { + biConsumer.accept(null, null); + } else { + ExceptionsHelper.dieOnError(f.cause()); + biConsumer.accept(null, f.cause()); + } + }); + + return new NettyListener(channelPromise); + } + } + + public static NettyListener fromChannelPromise(ChannelPromise channelPromise) { + if (channelPromise instanceof NettyListener) { + return (NettyListener) channelPromise; + } else { + return new NettyListener(channelPromise); + } + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java index 672c6d5abad0e..97eba20a16f16 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -52,20 +52,23 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.BiConsumer; public class NioHttpChannel extends AbstractRestChannel { private final BigArrays bigArrays; + private final int sequence; private final ThreadContext threadContext; private final FullHttpRequest nettyRequest; private final NioSocketChannel nioChannel; private final boolean resetCookies; - NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, + NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence, HttpHandlingSettings settings, ThreadContext threadContext) { super(request, settings.getDetailedErrorsEnabled()); this.nioChannel = nioChannel; this.bigArrays = bigArrays; + this.sequence = sequence; this.threadContext = threadContext; this.nettyRequest = request.getRequest(); this.resetCookies = settings.isResetCookies(); @@ -117,9 +120,8 @@ public void sendResponse(RestResponse response) { toClose.add(nioChannel::close); } - nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> { - Releasables.close(toClose); - }); + BiConsumer listener = (aVoid, throwable) -> Releasables.close(toClose); + nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener); success = true; } finally { if (success == false) { diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java new file mode 100644 index 0000000000000..2b702042ba7a8 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.LastHttpContent; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipeliningAggregator; +import org.elasticsearch.http.nio.NettyListener; +import org.elasticsearch.http.nio.NioHttpResponse; + +import java.nio.channels.ClosedChannelException; +import java.util.List; + +/** + * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. + */ +public class NioHttpPipeliningHandler extends ChannelDuplexHandler { + + private final Logger logger; + private final HttpPipeliningAggregator aggregator; + + /** + * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. + * + * @param logger for logging unexpected errors + * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is + * required as events cannot queue up indefinitely + */ + public NioHttpPipeliningHandler(Logger logger, final int maxEventsHeld) { + this.logger = logger; + this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld); + } + + @Override + public void channelRead(final ChannelHandlerContext ctx, final Object msg) { + if (msg instanceof LastHttpContent) { + HttpPipelinedRequest pipelinedRequest = aggregator.read(((LastHttpContent) msg)); + ctx.fireChannelRead(pipelinedRequest); + } else { + ctx.fireChannelRead(msg); + } + } + + @Override + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { + assert msg instanceof NioHttpResponse : "Message must be type: " + NioHttpResponse.class; + NioHttpResponse response = (NioHttpResponse) msg; + boolean success = false; + try { + NettyListener listener = NettyListener.fromChannelPromise(promise); + List> readyResponses = aggregator.write(response, listener); + success = true; + for (Tuple responseToWrite : readyResponses) { + ctx.write(responseToWrite.v1().getResponse(), responseToWrite.v2()); + } + } catch (IllegalStateException e) { + ctx.channel().close(); + } finally { + if (success == false) { + promise.setFailure(new ClosedChannelException()); + } + } + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise promise) { + List> inflightResponses = aggregator.removeAllInflightResponses(); + + if (inflightResponses.isEmpty() == false) { + ClosedChannelException closedChannelException = new ClosedChannelException(); + for (Tuple inflightResponse : inflightResponses) { + try { + inflightResponse.v2().setFailure(closedChannelException); + } catch (RuntimeException e) { + logger.error("unexpected error while releasing pipelined http responses", e); + } + } + } + ctx.close(promise); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java new file mode 100644 index 0000000000000..4b634994b4557 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.http.HttpPipelinedMessage; + +public class NioHttpResponse extends HttpPipelinedMessage { + + private final FullHttpResponse response; + + public NioHttpResponse(int sequence, FullHttpResponse response) { + super(sequence); + this.response = response; + } + + public FullHttpResponse getResponse() { + return response; + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index bdbee715bd0cf..825a023bd51bc 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.http.nio; import io.netty.handler.timeout.ReadTimeoutException; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; @@ -84,6 +85,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; public class NioHttpServerTransport extends AbstractHttpServerTransport { @@ -124,6 +126,7 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), Math.toIntExact(maxChunkSize.getBytes()), Math.toIntExact(maxHeaderSize.getBytes()), @@ -131,7 +134,8 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, SETTING_HTTP_RESET_COOKIES.get(settings), SETTING_HTTP_COMPRESSION.get(settings), SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + pipeliningMaxEvents); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); @@ -140,23 +144,29 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); - logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength); + logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," + + " pipelining_max_events[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents); } BigArrays getBigArrays() { return bigArrays; } + public Logger getLogger() { + return logger; + } + @Override protected void doStart() { boolean success = false; try { int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); - nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), - workerCount, SocketEventHandler::new); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + (s) -> new AcceptorEventHandler(s, this::nonChannelExceptionCaught), + daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), workerCount, + () -> new SocketEventHandler(this::nonChannelExceptionCaught)); channelFactory = new HttpChannelFactory(); this.boundAddress = createBoundHttpAddress(); @@ -265,6 +275,10 @@ protected void exceptionCaught(NioSocketChannel channel, Exception cause) { } } + protected void nonChannelExceptionCaught(Exception ex) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), ex); + } + private void closeChannels(List channels) { List> futures = new ArrayList<>(channels.size()); @@ -312,8 +326,10 @@ public NioSocketChannel createChannel(SocketSelector selector, SocketChannel cha @Override public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); - ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioHttpServerTransport.this::acceptChannel, - (e) -> {}); + Consumer exceptionHandler = (e) -> logger.error(() -> + new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); + Consumer acceptor = NioHttpServerTransport.this::acceptChannel; + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); nioChannel.setContext(context); return nioChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 9d794f951c8d2..2ef49d7791234 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -105,9 +106,10 @@ protected void doStart() { if (useNetworkServer) { acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings); } - nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), - NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + (s) -> new AcceptorEventHandler(s, this::onNonChannelException), + daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), NioTransport.NIO_WORKER_COUNT.get(settings), + () -> new SocketEventHandler(this::onNonChannelException)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = channelFactory(clientProfileSettings, true); @@ -193,8 +195,10 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel @Override public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel); - ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioTransport.this::acceptChannel, - (e) -> {}); + Consumer exceptionHandler = (e) -> logger.error(() -> + new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); + Consumer acceptor = NioTransport.this::acceptChannel; + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); nioChannel.setContext(context); return nioChannel; } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index e0c8bacca1d85..703f7acbf8257 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.nio.NioTransport; @@ -43,11 +44,13 @@ protected boolean addMockTransportService() { @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); - // randomize netty settings + // randomize nio settings if (randomBoolean()) { builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); } builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME); return builder.build(); } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index dce8319d2fc82..cc8eeb77cc2f6 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -61,11 +61,11 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; public class HttpReadWriteHandlerTests extends ESTestCase { @@ -91,7 +91,8 @@ public void setMocks() { SETTING_HTTP_RESET_COOKIES.getDefault(settings), SETTING_HTTP_COMPRESSION.getDefault(settings), SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings), + SETTING_PIPELINING_MAX_EVENTS.getDefault(settings)); ThreadContext threadContext = new ThreadContext(settings); nioSocketChannel = mock(NioSocketChannel.class); handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); @@ -148,7 +149,8 @@ public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() t handler.consumeReads(toChannelBuffer(buf)); - verifyZeroInteractions(transport); + verify(transport, times(0)).dispatchBadRequest(any(), any(), any()); + verify(transport, times(0)).dispatchRequest(any(), any()); List flushOperations = handler.pollFlushOperations(); assertFalse(flushOperations.isEmpty()); @@ -169,9 +171,10 @@ public void testEncodeHttpResponse() throws IOException { prepareHandlerForResponse(handler); FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + NioHttpResponse pipelinedResponse = new NioHttpResponse(0, fullHttpResponse); SocketChannelContext context = mock(SocketChannelContext.class); - HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class)); + HttpWriteOperation writeOperation = new HttpWriteOperation(context, pipelinedResponse, mock(BiConsumer.class)); List flushOperations = handler.writeToBytes(writeOperation); HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java new file mode 100644 index 0000000000000..d12c608aeca2a --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -0,0 +1,304 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.handler.codec.http.QueryStringDecoder; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; + +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.hamcrest.core.Is.is; + +public class NioHttpPipeliningHandlerTests extends ESTestCase { + + private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1); + private final Map waitingRequests = new ConcurrentHashMap<>(); + private final Map finishingRequests = new ConcurrentHashMap<>(); + + @After + public void cleanup() throws Exception { + waitingRequests.keySet().forEach(this::finishRequest); + shutdownExecutorService(); + } + + private CountDownLatch finishRequest(String url) { + waitingRequests.get(url).countDown(); + return finishingRequests.get(url); + } + + private void shutdownExecutorService() throws InterruptedException { + if (!handlerService.isShutdown()) { + handlerService.shutdown(); + handlerService.awaitTermination(10, TimeUnit.SECONDS); + } + if (!eventLoopService.isShutdown()) { + eventLoopService.shutdown(); + eventLoopService.awaitTermination(10, TimeUnit.SECONDS); + } + } + + public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); + } + + final List latches = new ArrayList<>(); + for (final String url : waitingRequests.keySet()) { + latches.add(finishRequest(url)); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); + } + + // random order execution + final List urls = new ArrayList<>(waitingRequests.keySet()); + Randomness.shuffle(urls); + final List latches = new ArrayList<>(); + for (final String url : urls) { + latches.add(finishRequest(url)); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = + new EmbeddedChannel( + new AggregateUrisAndHeadersHandler(), + new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + final DefaultHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/" + i); + embeddedChannel.writeInbound(request); + embeddedChannel.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT); + } + + final List latches = new ArrayList<>(); + for (int i = numberOfRequests - 1; i >= 0; i--) { + latches.add(finishRequest(Integer.toString(i))); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, Integer.toString(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < 1 + numberOfRequests + 1; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + Integer.toString(i))); + } + + final List latches = new ArrayList<>(); + final List requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList()); + Randomness.shuffle(requests); + + for (final Integer request : requests) { + latches.add(finishRequest(request.toString())); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + finishRequest(Integer.toString(numberOfRequests + 1)).await(); + + embeddedChannel.flush(); + + assertFalse(embeddedChannel.isOpen()); + } + + public void testPipeliningRequestsAreReleased() throws InterruptedException { + final int numberOfRequests = 10; + final EmbeddedChannel embeddedChannel = + new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1)); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + i)); + } + + HttpPipelinedRequest inbound; + ArrayList> requests = new ArrayList<>(); + while ((inbound = embeddedChannel.readInbound()) != null) { + requests.add(inbound); + } + + ArrayList promises = new ArrayList<>(); + for (int i = 1; i < requests.size(); ++i) { + final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); + ChannelPromise promise = embeddedChannel.newPromise(); + promises.add(promise); + int sequence = requests.get(i).getSequence(); + NioHttpResponse resp = new NioHttpResponse(sequence, httpResponse); + embeddedChannel.writeAndFlush(resp, promise); + } + + for (ChannelPromise promise : promises) { + assertFalse(promise.isDone()); + } + embeddedChannel.close().syncUninterruptibly(); + for (ChannelPromise promise : promises) { + assertTrue(promise.isDone()); + assertTrue(promise.cause() instanceof ClosedChannelException); + } + } + + private void assertReadHttpMessageHasContent(EmbeddedChannel embeddedChannel, String expectedContent) { + FullHttpResponse response = (FullHttpResponse) embeddedChannel.outboundMessages().poll(); + assertNotNull("Expected response to exist, maybe you did not wait long enough?", response); + assertNotNull("Expected response to have content " + expectedContent, response.content()); + String data = new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8); + assertThat(data, is(expectedContent)); + } + + private FullHttpRequest createHttpRequest(String uri) { + return new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uri); + } + + private static class AggregateUrisAndHeadersHandler extends SimpleChannelInboundHandler { + + static final Queue QUEUE_URI = new LinkedTransferQueue<>(); + + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) throws Exception { + QUEUE_URI.add(request.uri()); + } + + } + + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { + + @Override + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + LastHttpContent request = pipelinedRequest.getRequest(); + final QueryStringDecoder decoder; + if (request instanceof FullHttpRequest) { + decoder = new QueryStringDecoder(((FullHttpRequest)request).uri()); + } else { + decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll()); + } + + final String uri = decoder.path().replace("/", ""); + final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); + final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content); + httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes()); + + final CountDownLatch waitingLatch = new CountDownLatch(1); + waitingRequests.put(uri, waitingLatch); + final CountDownLatch finishingLatch = new CountDownLatch(1); + finishingRequests.put(uri, finishingLatch); + + handlerService.submit(() -> { + try { + waitingLatch.await(1000, TimeUnit.SECONDS); + final ChannelPromise promise = ctx.newPromise(); + eventLoopService.submit(() -> { + ctx.write(new NioHttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + finishingLatch.countDown(); + }); + } catch (InterruptedException e) { + fail(e.toString()); + } + }); + } + } +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java similarity index 53% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java rename to plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java index af0e7c85a8f63..074aafd6eab4b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java @@ -16,65 +16,53 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.http.netty4; + +package org.elasticsearch.http.nio; import io.netty.handler.codec.http.FullHttpResponse; -import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.NioIntegTestCase; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; import java.util.Collection; -import java.util.List; import java.util.Locale; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase { +public class NioPipeliningIT extends NioIntegTestCase { @Override protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("http.pipelining", false) - .build(); - } - - public void testThatNettyHttpServerDoesNotSupportPipelining() throws Exception { - ensureGreen(); - String[] requests = new String[] {"/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/"}; + public void testThatNioHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"}; HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = randomFrom(boundAddresses); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); - assertThat(responses, hasSize(requests.length)); - - List opaqueIds = new ArrayList<>(Netty4HttpClient.returnOpaqueIds(responses)); + assertThat(responses, hasSize(5)); - assertResponsesOutOfOrder(opaqueIds); + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); } } - /** - * checks if all responses are there, but also tests that they are out of order because pipelining is disabled - */ - private void assertResponsesOutOfOrder(List opaqueIds) { - String message = String.format(Locale.ROOT, "Expected returned http message ids to be in any order of: %s", opaqueIds); - assertThat(message, opaqueIds, containsInAnyOrder("0", "1", "2", "3", "4", "5", "6")); + private void assertOpaqueIdsInOrder(Collection opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 6f4453aa06cc9..eb5517b7acb56 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -18,33 +18,8 @@ */ package org.elasticsearch.upgrades; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.yaml.ObjectPath; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Future; -import java.util.function.Predicate; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractRollingTestCase extends ESRestTestCase { protected enum ClusterType { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 350636551d9ad..1351de16cf718 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java new file mode 100644 index 0000000000000..3ed98a5d1f772 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.junit.Before; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assume.assumeThat; + +/** + * Basic tests for simple xpack functionality that are only run if the + * cluster is the on the "zip" distribution. + */ +public class XPackIT extends AbstractRollingTestCase { + @Before + public void skipIfNotXPack() { + assumeThat("test is only supported if the distribution contains xpack", + System.getProperty("tests.distribution"), equalTo("zip")); + assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", + CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); + /* + * *Mostly* we want this for when we're upgrading from pre-6.3's + * zip distribution which doesn't contain xpack to post 6.3's zip + * distribution which *does* contain xpack. But we'll also run it + * on all upgrades for completeness's sake. + */ + } + + /** + * Test a basic feature (SQL) which doesn't require any trial license. + * Note that the test methods on this class can run in any order so we + * might have already installed a trial license. + */ + public void testBasicFeature() throws IOException { + Request bulk = new Request("POST", "/sql_test/doc/_bulk"); + bulk.setJsonEntity( + "{\"index\":{}}\n" + + "{\"f\": \"1\"}\n" + + "{\"index\":{}}\n" + + "{\"f\": \"2\"}\n"); + bulk.addParameter("refresh", "true"); + client().performRequest(bulk); + + Request sql = new Request("POST", "/_xpack/sql"); + sql.setJsonEntity("{\"query\": \"SELECT * FROM sql_test WHERE f > 1 ORDER BY f ASC\"}"); + String response = EntityUtils.toString(client().performRequest(sql).getEntity()); + assertEquals("{\"columns\":[{\"name\":\"f\",\"type\":\"text\"}],\"rows\":[[\"2\"]]}", response); + } + + /** + * Test creating a trial license and using it. This is interesting because + * our other tests test cover starting a new cluster with the default + * distribution and enabling the trial license but this test is the only + * one that can upgrade from the oss distribution to the default + * distribution with xpack and the create a trial license. We don't + * do a lot with the trial license because for the most + * part those things are tested elsewhere, off in xpack. But we do use the + * trial license a little bit to make sure that it works. + */ + public void testTrialLicense() throws IOException { + Request startTrial = new Request("POST", "/_xpack/license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + client().performRequest(startTrial); + + String noJobs = EntityUtils.toString( + client().performRequest(new Request("GET", "/_xpack/ml/anomaly_detectors")).getEntity()); + assertEquals("{\"count\":0,\"jobs\":[]}", noJobs); + + Request createJob = new Request("PUT", "/_xpack/ml/anomaly_detectors/test_job"); + createJob.setJsonEntity( + "{\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\": \"10m\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"sum\",\n" + + " \"field_name\": \"total\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"time_field\": \"timestamp\",\n" + + " \"time_format\": \"epoch_ms\"\n" + + " }\n" + + "}\n"); + client().performRequest(createJob); + } +} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index bfa856e381b12..99132f0c89d5b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -222,8 +221,8 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; Request request = new Request("GET", "/" + queryIndex + "/_search"); - request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), - new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + request.addHeader(CUSTOM_HEADER, randomHeaderValue); + request.addHeader(IRRELEVANT_HEADER, randomHeaderValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index 4ab64abda453b..2d139e7955ea9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -33,7 +32,8 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index da48e51b63bbe..e79e80315501b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -53,25 +52,29 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatRegularExpressionWorksOnMatch() throws IOException { - String corsValue = "http://localhost:9200"; - Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - - corsValue = "https://localhost:9201"; - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + { + String corsValue = "http://localhost:9200"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + } + { + String corsValue = "https://localhost:9201"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + } } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", "http://evil-host:9200"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -85,7 +88,7 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + request.addHeader("User-Agent", "Mozilla Bar"); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -100,20 +103,20 @@ public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); + assertResponseWithOriginHeader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { String corsValue = "http://evil-host:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -126,7 +129,7 @@ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { } } - protected static void assertResponseWithOriginheader(Response response, String expectedCorsHeader) { + private static void assertResponseWithOriginHeader(Response response, String expectedCorsHeader) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 6af08577393d9..a9a0a0c7ed945 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.test.rest.ESRestTestCase; @@ -39,7 +38,7 @@ public class HttpCompressionIT extends ESRestTestCase { public void testCompressesResponseIfRequested() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + request.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING); Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index e1d55afea1b54..976ba3131151f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -47,7 +46,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); - request.setHeaders(new BasicHeader("Accept", accept)); + request.addHeader("Accept", accept); final ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index b4dbc50d52db7..ac2503f2c525c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -26,8 +25,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import static org.hamcrest.Matchers.equalTo; @@ -62,7 +61,7 @@ public void testThatSettingHeadersWorks() throws IOException { } Request request = new Request("GET", "/_protected"); - request.setHeaders(new BasicHeader("Secret", "password")); + request.addHeader("Secret", "password"); Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 52a6bb1efb5f5..a8dfe89b67869 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -29,9 +29,9 @@ plugins { dependencies { compile "junit:junit:${versions.junit}" compile "org.hamcrest:hamcrest-core:${versions.hamcrest}" + compile "org.hamcrest:hamcrest-library:${versions.hamcrest}" - // needs to be on the classpath for JarHell - testRuntime project(':libs:elasticsearch-core') + compile project(':libs:elasticsearch-core') // pulls in the jar built by this project and its dependencies packagingTest project(path: project.path, configuration: 'runtime') diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java index 0b5e7a3b6e0d2..fa7f8e8ef78c5 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java @@ -19,13 +19,20 @@ package org.elasticsearch.packaging; -import org.junit.Test; +import org.elasticsearch.packaging.test.OssTarTests; +import org.elasticsearch.packaging.test.OssZipTests; +import org.elasticsearch.packaging.test.DefaultTarTests; +import org.elasticsearch.packaging.test.DefaultZipTests; -/** - * This class doesn't have any tests yet - */ -public class PackagingTests { +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; - @Test - public void testDummy() {} -} +@RunWith(Suite.class) +@SuiteClasses({ + DefaultTarTests.class, + DefaultZipTests.class, + OssTarTests.class, + OssZipTests.class +}) +public class PackagingTests {} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/VMTestRunner.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/VMTestRunner.java new file mode 100644 index 0000000000000..a8fd2c2770783 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/VMTestRunner.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging; + +import org.junit.runner.JUnitCore; + +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Ensures that the current JVM is running on a virtual machine before delegating to {@link JUnitCore}. We just check for the existence + * of a special file that we create during VM provisioning. + */ +public class VMTestRunner { + public static void main(String[] args) { + if (Files.exists(Paths.get("/is_vagrant_vm"))) { + JUnitCore.main(args); + } else { + throw new RuntimeException("This filesystem does not have an expected marker file indicating it's a virtual machine. These " + + "tests should only run in a virtual machine because they're destructive."); + } + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java new file mode 100644 index 0000000000000..f683cb9c145db --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.Installation; + +import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; +import static org.elasticsearch.packaging.util.Archives.installArchive; +import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assume.assumeThat; + +/** + * Tests that apply to the archive distributions (tar, zip). To add a case for a distribution, subclass and + * override {@link ArchiveTestCase#distribution()}. These tests should be the same across all archive distributions + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public abstract class ArchiveTestCase { + + private static Installation installation; + + /** The {@link Distribution} that should be tested in this case */ + protected abstract Distribution distribution(); + + @BeforeClass + public static void cleanup() { + installation = null; + cleanEverything(); + } + + @Before + public void onlyCompatibleDistributions() { + assumeThat(distribution().packaging.compatible, is(true)); + } + + @Test + public void test10Install() { + installation = installArchive(distribution()); + verifyArchiveInstallation(installation, distribution()); + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java new file mode 100644 index 0000000000000..9b359a329c1bb --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultTarTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_TAR; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java new file mode 100644 index 0000000000000..d9a6353a8c6f3 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultZipTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_ZIP; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java new file mode 100644 index 0000000000000..86637fc9d48e3 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssTarTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_TAR; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java new file mode 100644 index 0000000000000..b6cd1e596a09b --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssZipTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_ZIP; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java new file mode 100644 index 0000000000000..4a00570bf30f9 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -0,0 +1,239 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory; +import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File; +import static org.elasticsearch.packaging.util.FileMatcher.file; +import static org.elasticsearch.packaging.util.FileMatcher.p644; +import static org.elasticsearch.packaging.util.FileMatcher.p660; +import static org.elasticsearch.packaging.util.FileMatcher.p755; +import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; +import static org.elasticsearch.packaging.util.FileUtils.getDefaultArchiveInstallPath; +import static org.elasticsearch.packaging.util.FileUtils.getPackagingArchivesDir; +import static org.elasticsearch.packaging.util.FileUtils.lsGlob; + +import static org.elasticsearch.packaging.util.FileUtils.mv; +import static org.elasticsearch.packaging.util.Platforms.isDPKG; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; + +/** + * Installation and verification logic for archive distributions + */ +public class Archives { + + public static Installation installArchive(Distribution distribution) { + return installArchive(distribution, getDefaultArchiveInstallPath(), getCurrentVersion()); + } + + public static Installation installArchive(Distribution distribution, Path fullInstallPath, String version) { + final Shell sh = new Shell(); + + final Path distributionFile = getPackagingArchivesDir().resolve(distribution.filename(version)); + final Path baseInstallPath = fullInstallPath.getParent(); + final Path extractedPath = baseInstallPath.resolve("elasticsearch-" + version); + + assertThat("distribution file must exist", Files.exists(distributionFile), is(true)); + assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty()); + + if (distribution.packaging == Distribution.Packaging.TAR) { + + if (Platforms.LINUX) { + sh.run("tar", "-C", baseInstallPath.toString(), "-xzpf", distributionFile.toString()); + } else { + throw new RuntimeException("Distribution " + distribution + " is not supported on windows"); + } + + } else if (distribution.packaging == Distribution.Packaging.ZIP) { + + if (Platforms.LINUX) { + sh.run("unzip", distributionFile.toString(), "-d", baseInstallPath.toString()); + } else { + sh.run("powershell.exe", "-Command", + "Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " + + "[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')"); + } + + } else { + throw new RuntimeException("Distribution " + distribution + " is not a known archive type"); + } + + assertThat("archive was extracted", Files.exists(extractedPath), is(true)); + + mv(extractedPath, fullInstallPath); + + assertThat("extracted archive moved to install location", Files.exists(fullInstallPath)); + final List installations = lsGlob(baseInstallPath, "elasticsearch*"); + assertThat("only the intended installation exists", installations, hasSize(1)); + assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath)); + + if (Platforms.LINUX) { + setupArchiveUsersLinux(fullInstallPath); + } + + return new Installation(fullInstallPath); + } + + private static void setupArchiveUsersLinux(Path installPath) { + final Shell sh = new Shell(); + + if (sh.runIgnoreExitCode("getent", "group", "elasticsearch").isSuccess() == false) { + if (isDPKG()) { + sh.run("addgroup", "--system", "elasticsearch"); + } else { + sh.run("groupadd", "-r", "elasticsearch"); + } + } + + if (sh.runIgnoreExitCode("id", "elasticsearch").isSuccess() == false) { + if (isDPKG()) { + sh.run("adduser", + "--quiet", + "--system", + "--no-create-home", + "--ingroup", "elasticsearch", + "--disabled-password", + "--shell", "/bin/false", + "elasticsearch"); + } else { + sh.run("useradd", + "--system", + "-M", + "--gid", "elasticsearch", + "--shell", "/sbin/nologin", + "--comment", "elasticsearch user", + "elasticsearch"); + } + } + sh.run("chown", "-R", "elasticsearch:elasticsearch", installPath.toString()); + } + + public static void verifyArchiveInstallation(Installation installation, Distribution distribution) { + // on Windows for now we leave the installation owned by the vagrant user that the tests run as. Since the vagrant account + // is a local administrator, the files really end up being owned by the local administrators group. In the future we'll + // install and run elasticesearch with a role user on Windows + final String owner = Platforms.WINDOWS + ? "BUILTIN\\Administrators" + : "elasticsearch"; + + verifyOssInstallation(installation, distribution, owner); + if (distribution.flavor == Distribution.Flavor.DEFAULT) { + verifyDefaultInstallation(installation, distribution, owner); + } + } + + private static void verifyOssInstallation(Installation es, Distribution distribution, String owner) { + Stream.of( + es.home, + es.config, + es.plugins, + es.modules, + es.logs + ).forEach(dir -> assertThat(dir, file(Directory, owner, owner, p755))); + + assertThat(Files.exists(es.data), is(false)); + assertThat(Files.exists(es.scripts), is(false)); + + assertThat(es.home.resolve("bin"), file(Directory, owner, owner, p755)); + assertThat(es.home.resolve("lib"), file(Directory, owner, owner, p755)); + assertThat(Files.exists(es.config.resolve("elasticsearch.keystore")), is(false)); + + Stream.of( + "bin/elasticsearch", + "bin/elasticsearch-env", + "bin/elasticsearch-keystore", + "bin/elasticsearch-plugin", + "bin/elasticsearch-translog" + ).forEach(executable -> { + + assertThat(es.home.resolve(executable), file(File, owner, owner, p755)); + + if (distribution.packaging == Distribution.Packaging.ZIP) { + assertThat(es.home.resolve(executable + ".bat"), file(File, owner)); + } + }); + + if (distribution.packaging == Distribution.Packaging.ZIP) { + Stream.of( + "bin/elasticsearch-service.bat", + "bin/elasticsearch-service-mgr.exe", + "bin/elasticsearch-service-x64.exe" + ).forEach(executable -> assertThat(es.home.resolve(executable), file(File, owner))); + } + + Stream.of( + "elasticsearch.yml", + "jvm.options", + "log4j2.properties" + ).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660))); + + Stream.of( + "NOTICE.txt", + "LICENSE.txt", + "README.textile" + ).forEach(doc -> assertThat(es.home.resolve(doc), file(File, owner, owner, p644))); + } + + private static void verifyDefaultInstallation(Installation es, Distribution distribution, String owner) { + + Stream.of( + "bin/elasticsearch-certgen", + "bin/elasticsearch-certutil", + "bin/elasticsearch-croneval", + "bin/elasticsearch-migrate", + "bin/elasticsearch-saml-metadata", + "bin/elasticsearch-setup-passwords", + "bin/elasticsearch-sql-cli", + "bin/elasticsearch-syskeygen", + "bin/elasticsearch-users", + "bin/x-pack-env", + "bin/x-pack-security-env", + "bin/x-pack-watcher-env" + ).forEach(executable -> { + + assertThat(es.home.resolve(executable), file(File, owner, owner, p755)); + + if (distribution.packaging == Distribution.Packaging.ZIP) { + assertThat(es.home.resolve(executable + ".bat"), file(File, owner)); + } + }); + + // at this time we only install the current version of archive distributions, but if that changes we'll need to pass + // the version through here + assertThat(es.home.resolve("bin/elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), file(File, owner, owner, p755)); + + Stream.of( + "users", + "users_roles", + "roles.yml", + "role_mapping.yml", + "log4j2.properties" + ).forEach(config -> assertThat(es.config.resolve(config), file(File, owner, owner, p660))); + } + +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java new file mode 100644 index 0000000000000..9e9150c9c1814 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.packaging.util.FileUtils.getTempDir; +import static org.elasticsearch.packaging.util.FileUtils.lsGlob; +import static org.elasticsearch.packaging.util.Platforms.isAptGet; +import static org.elasticsearch.packaging.util.Platforms.isDPKG; +import static org.elasticsearch.packaging.util.Platforms.isRPM; +import static org.elasticsearch.packaging.util.Platforms.isSystemd; +import static org.elasticsearch.packaging.util.Platforms.isYUM; + +public class Cleanup { + + private static final List ELASTICSEARCH_FILES_LINUX = Arrays.asList( + "/usr/share/elasticsearch", + "/etc/elasticsearch", + "/var/lib/elasticsearch", + "/var/log/elasticsearch", + "/etc/default/elasticsearch", + "/etc/sysconfig/elasticsearch", + "/var/run/elasticsearch", + "/usr/share/doc/elasticsearch", + "/usr/lib/systemd/system/elasticsearch.conf", + "/usr/lib/tmpfiles.d/elasticsearch.conf", + "/usr/lib/sysctl.d/elasticsearch.conf" + ); + + // todo + private static final List ELASTICSEARCH_FILES_WINDOWS = Collections.emptyList(); + + public static void cleanEverything() { + final Shell sh = new Shell(); + + // kill elasticsearch processes + if (Platforms.WINDOWS) { + + // the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here + sh.runIgnoreExitCode("powershell.exe", "-Command", + "Get-WmiObject Win32_Process | " + + "Where-Object { $_.CommandLine -Match 'org.elasticsearch.bootstrap.Elasticsearch' } | " + + "ForEach-Object { $_.Terminate() }"); + + } else { + + sh.runIgnoreExitCode("pkill", "-u", "elasticsearch"); + sh.runIgnoreExitCode("bash", "-c", + "ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9"); + + } + + if (Platforms.LINUX) { + purgePackagesLinux(); + } + + // remove elasticsearch users + if (Platforms.LINUX) { + sh.runIgnoreExitCode("userdel", "elasticsearch"); + sh.runIgnoreExitCode("groupdel", "elasticsearch"); + } + + // delete files that may still exist + lsGlob(getTempDir(), "elasticsearch*").forEach(FileUtils::rm); + final List filesToDelete = Platforms.WINDOWS + ? ELASTICSEARCH_FILES_WINDOWS + : ELASTICSEARCH_FILES_LINUX; + filesToDelete.stream() + .map(Paths::get) + .filter(Files::exists) + .forEach(FileUtils::rm); + + // disable elasticsearch service + // todo add this for windows when adding tests for service intallation + if (Platforms.LINUX && isSystemd()) { + sh.run("systemctl", "unmask", "systemd-sysctl.service"); + } + } + + private static void purgePackagesLinux() { + final Shell sh = new Shell(); + + if (isRPM()) { + sh.runIgnoreExitCode("rpm", "--quiet", "-e", "elasticsearch", "elasticsearch-oss"); + } + + if (isYUM()) { + sh.runIgnoreExitCode("yum", "remove", "-y", "elasticsearch", "elasticsearch-oss"); + } + + if (isDPKG()) { + sh.runIgnoreExitCode("dpkg", "--purge", "elasticsearch", "elasticsearch-oss"); + } + + if (isAptGet()) { + sh.runIgnoreExitCode("apt-get", "--quiet", "--yes", "purge", "elasticsearch", "elasticsearch-oss"); + } + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java new file mode 100644 index 0000000000000..4f0c8751ca4e7 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +public enum Distribution { + + OSS_TAR(Packaging.TAR, Flavor.OSS), + OSS_ZIP(Packaging.ZIP, Flavor.OSS), + OSS_DEB(Packaging.DEB, Flavor.OSS), + OSS_RPM(Packaging.RPM, Flavor.OSS), + + DEFAULT_TAR(Packaging.TAR, Flavor.DEFAULT), + DEFAULT_ZIP(Packaging.ZIP, Flavor.DEFAULT), + DEFAULT_DEB(Packaging.DEB, Flavor.DEFAULT), + DEFAULT_RPM(Packaging.RPM, Flavor.DEFAULT); + + public final Packaging packaging; + public final Flavor flavor; + + Distribution(Packaging packaging, Flavor flavor) { + this.packaging = packaging; + this.flavor = flavor; + } + + public String filename(String version) { + return flavor.name + "-" + version + packaging.extension; + } + + public enum Packaging { + + TAR(".tar.gz", Platforms.LINUX), + ZIP(".zip", true), + DEB(".deb", Platforms.isDPKG()), + RPM(".rpm", Platforms.isRPM()); + + /** The extension of this distribution's file */ + public final String extension; + + /** Whether the distribution is intended for use on the platform the current JVM is running on */ + public final boolean compatible; + + Packaging(String extension, boolean compatible) { + this.extension = extension; + this.compatible = compatible; + } + } + + public enum Flavor { + + OSS("elasticsearch-oss"), + DEFAULT("elasticsearch"); + + public final String name; + + Flavor(String name) { + this.name = name; + } + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileMatcher.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileMatcher.java new file mode 100644 index 0000000000000..9fdf6d60081a3 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileMatcher.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.packaging.util.FileUtils.getBasicFileAttributes; +import static org.elasticsearch.packaging.util.FileUtils.getFileOwner; +import static org.elasticsearch.packaging.util.FileUtils.getPosixFileAttributes; + +import static java.nio.file.attribute.PosixFilePermissions.fromString; + +/** + * Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission + * set is what we expect. + * + * This class saves information about its failed matches in instance variables and so instances should not be reused + */ +public class FileMatcher extends TypeSafeMatcher { + + public enum Fileness { File, Directory } + + public static final Set p755 = fromString("rwxr-xr-x"); + public static final Set p660 = fromString("rw-rw----"); + public static final Set p644 = fromString("rw-r--r--"); + + private final Fileness fileness; + private final String owner; + private final String group; + private final Set posixPermissions; + + private String mismatch; + + public FileMatcher(Fileness fileness, String owner, String group, Set posixPermissions) { + this.fileness = Objects.requireNonNull(fileness); + this.owner = Objects.requireNonNull(owner); + this.group = group; + this.posixPermissions = posixPermissions; + } + + @Override + protected boolean matchesSafely(Path path) { + if (Files.exists(path) == false) { + mismatch = "Does not exist"; + return false; + } + + if (Platforms.WINDOWS) { + final BasicFileAttributes attributes = getBasicFileAttributes(path); + final String attributeViewOwner = getFileOwner(path); + + if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) { + mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file"); + return false; + } + + if (attributeViewOwner.contains(owner) == false) { + mismatch = "Owned by " + attributeViewOwner; + return false; + } + } else { + final PosixFileAttributes attributes = getPosixFileAttributes(path); + + if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) { + mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file"); + return false; + } + + if (owner.equals(attributes.owner().getName()) == false) { + mismatch = "Owned by " + attributes.owner().getName(); + return false; + } + + if (group != null && group.equals(attributes.group().getName()) == false) { + mismatch = "Owned by group " + attributes.group().getName(); + return false; + } + + if (posixPermissions != null && posixPermissions.equals(attributes.permissions()) == false) { + mismatch = "Has permissions " + attributes.permissions(); + return false; + } + } + + return true; + } + + @Override + public void describeMismatchSafely(Path path, Description description) { + description.appendText("path ").appendValue(path); + if (mismatch != null) { + description.appendText(mismatch); + } + } + + @Override + public void describeTo(Description description) { + description.appendValue("file/directory: ").appendValue(fileness) + .appendText(" with owner ").appendValue(owner) + .appendText(" with group ").appendValue(group) + .appendText(" with posix permissions ").appendValueList("[", ",", "]", posixPermissions); + } + + public static FileMatcher file(Fileness fileness, String owner) { + return file(fileness, owner, null, null); + } + + public static FileMatcher file(Fileness fileness, String owner, String group, Set permissions) { + return new FileMatcher(fileness, owner, group, permissions); + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java new file mode 100644 index 0000000000000..ad826675244a0 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileOwnerAttributeView; +import java.nio.file.attribute.PosixFileAttributes; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsNot.not; +import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString; + +/** + * Wrappers and convenience methods for common filesystem operations + */ +public class FileUtils { + + public static List lsGlob(Path directory, String glob) { + List paths = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(directory, glob)) { + + for (Path path : stream) { + paths.add(path); + } + return paths; + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static void rm(Path... paths) { + try { + IOUtils.rm(paths); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Path mv(Path source, Path target) { + try { + return Files.move(source, target); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static String slurp(Path file) { + try { + return String.join("\n", Files.readAllLines(file)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Gets the owner of a file in a way that should be supported by all filesystems that have a concept of file owner + */ + public static String getFileOwner(Path path) { + try { + FileOwnerAttributeView view = Files.getFileAttributeView(path, FileOwnerAttributeView.class); + return view.getOwner().getName(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Gets attributes that are supported by all filesystems + */ + public static BasicFileAttributes getBasicFileAttributes(Path path) { + try { + return Files.readAttributes(path, BasicFileAttributes.class); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Gets attributes that are supported by posix filesystems + */ + public static PosixFileAttributes getPosixFileAttributes(Path path) { + try { + return Files.readAttributes(path, PosixFileAttributes.class); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + // vagrant creates /tmp for us in windows so we use that to avoid long paths + public static Path getTempDir() { + return Paths.get("/tmp"); + } + + public static Path getDefaultArchiveInstallPath() { + return getTempDir().resolve("elasticsearch"); + } + + public static String getCurrentVersion() { + return slurp(getPackagingArchivesDir().resolve("version")); + } + + public static Path getPackagingArchivesDir() { + String fromEnv = System.getenv("PACKAGING_ARCHIVES"); + assertThat(fromEnv, not(isEmptyOrNullString())); + return Paths.get(fromEnv); + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java new file mode 100644 index 0000000000000..d231762d06227 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import java.nio.file.Path; + +/** + * Represents an installation of Elasticsearch + */ +public class Installation { + + public final Path home; + public final Path config; + public final Path data; + public final Path logs; + public final Path plugins; + public final Path modules; + public final Path scripts; + + public Installation(Path home, Path config, Path data, Path logs, Path plugins, Path modules, Path scripts) { + this.home = home; + this.config = config; + this.data = data; + this.logs = logs; + this.plugins = plugins; + this.modules = modules; + this.scripts = scripts; + } + + public Installation(Path home) { + this( + home, + home.resolve("config"), + home.resolve("data"), + home.resolve("logs"), + home.resolve("plugins"), + home.resolve("modules"), + home.resolve("scripts") + ); + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java new file mode 100644 index 0000000000000..230af8efc2dbb --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +public class Platforms { + public static final String OS_NAME = System.getProperty("os.name"); + public static final boolean LINUX = OS_NAME.startsWith("Linux"); + public static final boolean WINDOWS = OS_NAME.startsWith("Windows"); + + public static boolean isDPKG() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "dpkg").isSuccess(); + } + + public static boolean isAptGet() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "apt-get").isSuccess(); + } + + public static boolean isRPM() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "rpm").isSuccess(); + } + + public static boolean isYUM() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "yum").isSuccess(); + } + + public static boolean isSystemd() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "systemctl").isSuccess(); + } + + public static boolean isSysVInit() { + if (WINDOWS) { + return false; + } + return new Shell().runIgnoreExitCode("which", "service").isSuccess(); + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java new file mode 100644 index 0000000000000..3adc0b62e04ea --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java @@ -0,0 +1,193 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import org.elasticsearch.common.SuppressForbidden; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; + +/** + * Wrapper to run shell commands and collect their outputs in a less verbose way + */ +public class Shell { + + final Map env; + final Path workingDirectory; + + public Shell() { + this(emptyMap(), null); + } + + public Shell(Map env) { + this(env, null); + } + + public Shell(Path workingDirectory) { + this(emptyMap(), workingDirectory); + } + + public Shell(Map env, Path workingDirectory) { + this.env = new HashMap<>(env); + this.workingDirectory = workingDirectory; + } + + public Result run(String... command) { + Result result = runIgnoreExitCode(command); + if (result.isSuccess() == false) { + throw new RuntimeException("Command was not successful: [" + String.join(" ", command) + "] result: " + result.toString()); + } + return result; + } + + public Result runIgnoreExitCode(String... command) { + ProcessBuilder builder = new ProcessBuilder(); + builder.command(command); + + if (workingDirectory != null) { + setWorkingDirectory(builder, workingDirectory); + } + + if (env != null && env.isEmpty() == false) { + for (Map.Entry entry : env.entrySet()) { + builder.environment().put(entry.getKey(), entry.getValue()); + } + } + + try { + + Process process = builder.start(); + + StringBuilder stdout = new StringBuilder(); + StringBuilder stderr = new StringBuilder(); + + Thread stdoutThread = new Thread(new StreamCollector(process.getInputStream(), stdout)); + Thread stderrThread = new Thread(new StreamCollector(process.getErrorStream(), stderr)); + + stdoutThread.start(); + stderrThread.start(); + + stdoutThread.join(); + stderrThread.join(); + + int exitCode = process.waitFor(); + + return new Result(exitCode, stdout.toString(), stderr.toString()); + + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } + + @SuppressForbidden(reason = "ProcessBuilder expects java.io.File") + private static void setWorkingDirectory(ProcessBuilder builder, Path path) { + builder.directory(path.toFile()); + } + + public String toString() { + return new StringBuilder() + .append("<") + .append(this.getClass().getName()) + .append(" ") + .append("env = [") + .append(env) + .append("]") + .append("workingDirectory = [") + .append(workingDirectory) + .append("]") + .append(">") + .toString(); + } + + public static class Result { + public final int exitCode; + public final String stdout; + public final String stderr; + + public Result(int exitCode, String stdout, String stderr) { + this.exitCode = exitCode; + this.stdout = stdout; + this.stderr = stderr; + } + + public boolean isSuccess() { + return exitCode == 0; + } + + public String toString() { + return new StringBuilder() + .append("<") + .append(this.getClass().getName()) + .append(" ") + .append("exitCode = [") + .append(exitCode) + .append("]") + .append(" ") + .append("stdout = [") + .append(stdout) + .append("]") + .append(" ") + .append("stderr = [") + .append(stderr) + .append("]") + .append(">") + .toString(); + } + } + + private static class StreamCollector implements Runnable { + private final InputStream input; + private final Appendable appendable; + + StreamCollector(InputStream input, Appendable appendable) { + this.input = Objects.requireNonNull(input); + this.appendable = Objects.requireNonNull(appendable); + } + + public void run() { + try { + + BufferedReader reader = new BufferedReader(reader(input)); + String line; + + while ((line = reader.readLine()) != null) { + appendable.append(line); + appendable.append("\n"); + } + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @SuppressForbidden(reason = "the system's default character set is a best guess of what subprocesses will use") + private static InputStreamReader reader(InputStream inputStream) { + return new InputStreamReader(inputStream); + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 2c5419589ec6d..6338598de05d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -20,6 +20,10 @@ setup: --- "Get all aliases via /_alias": + - do: + indices.create: + index: test_index_3 + - do: indices.get_alias: {} @@ -27,7 +31,41 @@ setup: - match: {test_index.aliases.test_blias: {}} - match: {test_index_2.aliases.test_alias: {}} - match: {test_index_2.aliases.test_blias: {}} + - match: {test_index_3.aliases: {}} + +--- +"Get aliases via /_alias/_all": + + - do: + indices.create: + index: test_index_3 + + - do: + indices.get_alias: + name: _all + + - match: {test_index.aliases.test_alias: {}} + - match: {test_index.aliases.test_blias: {}} + - match: {test_index_2.aliases.test_alias: {}} + - match: {test_index_2.aliases.test_blias: {}} + - is_false: test_index_3 + +--- +"Get aliases via /_alias/*": + - do: + indices.create: + index: test_index_3 + + - do: + indices.get_alias: + name: _all + + - match: {test_index.aliases.test_alias: {}} + - match: {test_index.aliases.test_blias: {}} + - match: {test_index_2.aliases.test_alias: {}} + - match: {test_index_2.aliases.test_blias: {}} + - is_false: test_index_3 --- "Get all aliases via /{index}/_alias/": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index 40e9d705ea4ad..884a50507c7b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -45,9 +45,8 @@ setup: "Nested doc version and seqIDs": - skip: - # fixed in 6.0.1 - version: " - 6.0.0" - reason: "version and seq IDs where not accurate in previous versions" + version: " - 6.3.99" + reason: "object notation for docvalue_fields was introduced in 6.4" - do: index: @@ -61,7 +60,7 @@ setup: - do: search: - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } @@ -84,7 +83,7 @@ setup: - do: search: - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 3830a68b28fa0..59692873cc456 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -133,7 +133,53 @@ setup: --- "docvalue_fields": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings - do: + warnings: + - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + search: + body: + docvalue_fields: [ "count" ] + - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields as url param": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: + warnings: + - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' search: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields with default format": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + - do: + search: + body: + docvalue_fields: + - field: "count" + format: "use_field_mapping" + - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields with explicit format": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + - do: + search: + body: + docvalue_fields: + - field: "count" + format: "#.0" + - match: { hits.hits.0.fields.count: ["1.0"] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index da9c739ed6677..905635e1d10b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -62,6 +62,9 @@ setup: --- "Docvalues_fields size limit": + - skip: + version: " - 6.3.99" + reason: "The object notation for docvalue_fields is only supported on 6.4+" - do: catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ search: @@ -69,7 +72,13 @@ setup: body: query: match_all: {} - docvalue_fields: ["one", "two", "three"] + docvalue_fields: + - field: "one" + format: "use_field_mapping" + - field: "two" + format: "use_field_mapping" + - field: "three" + format: "use_field_mapping" --- "Script_fields size limit": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml index b944fe43791e4..47f5ac0934ce9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml @@ -51,6 +51,9 @@ setup: --- "Verify created repository": + - skip: + version: " - 6.99.99" + reason: AwaitsFix for https://github.com/elastic/elasticsearch/issues/30807 - do: snapshot.verify_repository: repository: test_repo_get_2 diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 14f5fcb381f1c..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b70d03784d06a643e096fae4d959200aa246ba16 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..1c471a77d80c4 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +96ab108569c77932ecb17c45421affece207df5c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 47afb59e45eb7..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d660a63ac0f7ab2772a45ae518518472bf620620 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4c8842872abcd --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +72d09ca50979f716a57f53f2de33d55023a166ec \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 27d7aaab2f589..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9eaae9dcd4ec88227475cb81d3be9afa767f1b22 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..948aacf662f5e --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +2b2ea6bfe6fa159bbf205bf7f7fa2ed2c22bbffc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 176c3a86afe7f..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd15f0008742c84899d678cb0cecda06d0a6d63e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..30a960c5a8047 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +423e4fff9276101d845d6073dc6cd27504def207 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0bfe9cfb79aff..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ce38b8610a7f402f2da3b0e408e508151d979c5 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..fb3cd72c75569 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +27561038da2edcae3ecc3a08b0a52824966af87a \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index c1a0127e2ce73..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53819f03a07050a4af28361d64395c86f2cea008 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..cd989836ab24f --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +d7d422159f705261784d121e24877119d9c95083 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 615a0dec0c0d4..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cdc0e2b65d146ed11f4d2507109e530d59ff33d \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..c4d8ad61c7396 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +fc09508fde6ba87f241d7e3148d9e310c0db9cb9 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 12f5eff262e9c..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e56090463703112ad64ad457d18bae9a5b2966b8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..0cb51736803cd --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +201fdf3432ff3fef0f48c38c2c0f482c144f6868 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a787a00541a54..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9faf974b77058e44a6d35e956db4f5fb67389dfa \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..ecd6440ba642a --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +917df8c8d08952a012a34050b183b6204ae7081b \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 7d95cd6b3b6e3..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b852b1fe70ef70736b2b1a9ad57eb93cbaed0423 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..3e65eaeef91b3 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +caff84fa66cb0376835c39f3d4ca7dfd2177d8f4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index ac0598b3f0c49..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2fa99ec7140fcf35db16ac1feb78ef142750d39 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..c86854b16c308 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e1bce61a9d9129a8d0fdd3127a84665d29f53eb0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a2537dbdde529..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9963f60d3a0924b877a6f910650c5f2384822a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..144984a3869b0 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +3a2e4373d79fda968a078971efa2cb8ec9ff65b0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 6844bcd13b278..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f33ba54da5e0e125f4c5ef7dd800dd6185e4f61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..fd19f4ad8114a --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +7f14927e5c3c1c85c4c5b3681c28c5e36f241dda \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0343db2d94485..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb3c18c987395dae6fe63744f5a50fd367ea5a74 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..ba405960dbeb7 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +6e708a38c957a655e0cfedb06a1b9aa892929db0 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java index b83c8158c4f7a..43036a2a697ef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java @@ -22,6 +22,9 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +33,13 @@ */ public class DeleteRepositoryResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("delete_repository", true, args -> new DeleteRepositoryResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + DeleteRepositoryResponse() { } @@ -49,4 +59,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } + public static DeleteRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 27612a3dab24b..c3fb2d58bebf3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -19,23 +19,112 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; /** - * Unregister repository response + * Verify repository response */ public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { - private DiscoveryNode[] nodes; + static final String NODES = "nodes"; + static final String NAME = "name"; + + public static class NodeView implements Writeable, ToXContentObject { + private static final ObjectParser.NamedObjectParser PARSER; + static { + ObjectParser internalParser = new ObjectParser<>(NODES); + internalParser.declareString(NodeView::setName, new ParseField(NAME)); + PARSER = (p, v, name) -> internalParser.parse(p, new NodeView(name), null); + } + + final String nodeId; + String name; + + public NodeView(String nodeId) { this.nodeId = nodeId; } + + public NodeView(String nodeId, String name) { + this(nodeId); + this.name = name; + } + + public NodeView(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(nodeId); + out.writeString(name); + } + + void setName(String name) { this.name = name; } + + public String getName() { return name; } + + public String getNodeId() { return nodeId; } + + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(nodeId); + { + builder.field(NAME, name); + } + builder.endObject(); + return builder; + } + + /** + * Temporary method that allows turning a {@link NodeView} into a {@link DiscoveryNode}. This representation will never be used in + * practice, because in >= 6.4 a consumer of the response will only be able to retrieve a representation of {@link NodeView} + * objects. + * + * Effectively this will be used to hold the state of the object in 6.x so there is no need to have 2 backing objects that + * represent the state of the Response. In practice these will always be read by a consumer as a NodeView, but it eases the + * transition to master which will not contain any representation of a {@link DiscoveryNode}. + */ + DiscoveryNode convertToDiscoveryNode() { + return new DiscoveryNode(name, nodeId, "", "", "", new TransportAddress(TransportAddress.META_ADDRESS, 0), + Collections.emptyMap(), Collections.emptySet(), Version.CURRENT); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + NodeView other = (NodeView) obj; + return Objects.equals(nodeId, other.nodeId) && + Objects.equals(name, other.name); + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, name); + } + } + + private List nodes; private ClusterName clusterName; @@ -45,53 +134,56 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte public VerifyRepositoryResponse(ClusterName clusterName, DiscoveryNode[] nodes) { this.clusterName = clusterName; - this.nodes = nodes; + this.nodes = Arrays.asList(nodes); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = new ClusterName(in); - nodes = new DiscoveryNode[in.readVInt()]; - for (int i=0; i n.convertToDiscoveryNode()).collect(Collectors.toList()); + } else { + clusterName = new ClusterName(in); + this.nodes = in.readList(DiscoveryNode::new); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - clusterName.writeTo(out); - out.writeVInt(nodes.length); - for (DiscoveryNode node : nodes) { - node.writeTo(out); + if (Version.CURRENT.onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeList(getNodes()); + } else { + clusterName.writeTo(out); + out.writeList(nodes); } } - public DiscoveryNode[] getNodes() { - return nodes; + public List getNodes() { + return nodes.stream().map(dn -> new NodeView(dn.getId(), dn.getName())).collect(Collectors.toList()); } public ClusterName getClusterName() { return clusterName; } - static final class Fields { - static final String NODES = "nodes"; - static final String NAME = "name"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject(Fields.NODES); - for (DiscoveryNode node : nodes) { - builder.startObject(node.getId()); - builder.field(Fields.NAME, node.getName()); + { + builder.startObject(NODES); + { + for (DiscoveryNode node : nodes) { + builder.startObject(node.getId()); + { + builder.field(NAME, node.getName()); + } + builder.endObject(); + } + } builder.endObject(); } builder.endObject(); - builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 1e11f126bb6f0..d9ed6e6792f85 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -153,7 +153,7 @@ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder optio } } if (options.getDocValueFields() != null) { - options.getDocValueFields().forEach(groupSource::docValueField); + options.getDocValueFields().forEach(ff -> groupSource.docValueField(ff.field, ff.format)); } if (options.getStoredFieldsContext() != null && options.getStoredFieldsContext().fieldNames() != null) { options.getStoredFieldsContext().fieldNames().forEach(groupSource::storedField); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 91ac46c1d62b8..424db04ce3903 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -290,11 +290,21 @@ public SearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullabl * * @param name The field to get from the docvalue */ - public SearchRequestBuilder addDocValueField(String name) { - sourceBuilder().docValueField(name); + public SearchRequestBuilder addDocValueField(String name, String format) { + sourceBuilder().docValueField(name, format); return this; } + /** + * Adds a docvalue based field to load and return. The field does not have to be stored, + * but its recommended to use non analyzed or numeric fields. + * + * @param name The field to get from the docvalue + */ + public SearchRequestBuilder addDocValueField(String name) { + return addDocValueField(name, null); + } + /** * Adds a stored field to load and return (note, it must be stored) as part of the search request. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 6b39af478f432..46207b94c3af4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -341,13 +341,12 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea return searchTransportService.getConnection(clusterName, discoveryNode); }; if (searchRequest.isMaxConcurrentShardRequestsSet() == false) { - // we try to set a default of max concurrent shard requests based on - // the node count but upper-bound it by 256 by default to keep it sane. A single - // search request that fans out lots of shards should hit a cluster too hard while 256 is already a lot. - // we multiply it by the default number of shards such that a single request in a cluster of 1 would hit all shards of a - // default index. - searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount - * IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getDefault(Settings.EMPTY))); + /* + * We try to set a default of max concurrent shard requests based on the node count but upper-bound it by 256 by default to keep + * it sane. A single search request that fans out to lots of shards should not hit a cluster too hard while 256 is already a + * lot. + */ + searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount)); } boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators); searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), diff --git a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java index 148a1dea3095f..a4767507ef1aa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java @@ -25,7 +25,11 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener { /** - * Called to determine which nodes the acknowledgement is expected from + * Called to determine which nodes the acknowledgement is expected from. + * + * As this method will be called multiple times to determine the set of acking nodes, + * it is crucial for it to return consistent results: Given the same listener instance + * and the same node parameter, the method implementation should return the same result. * * @param discoveryNode a node * @return true if the node is expected to send ack back, false otherwise diff --git a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java index faf2f30bb3ed4..8d61fe964265d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java @@ -61,7 +61,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { * @param e optional error that might have been thrown */ public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(newResponse(true)); + listener.onResponse(newResponse(e == null)); } protected abstract Response newResponse(boolean acknowledged); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7af2ec2d237d2..db45ce6c9e353 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -181,8 +181,7 @@ static Setting buildNumberOfShardsSetting() { if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); } - return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards, - Property.IndexScope, Property.Final); + return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 1, 1, maxNumShards, Property.IndexScope, Property.Final); } public static final String INDEX_SETTING_PREFIX = "index."; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index b8e898cf6f5e3..82d947b4158a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -363,7 +363,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { @Override public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(new ClusterStateUpdateResponse(true)); + listener.onResponse(new ClusterStateUpdateResponse(e == null)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 54a6568af3fa2..1757548c28b09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -563,7 +563,7 @@ private static class AckCountDownListener implements Discovery.AckListener { private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; - private final DiscoveryNodes nodes; + private final DiscoveryNode masterNode; private final long clusterStateVersion; private final Future ackTimeoutCallback; private Exception lastFailure; @@ -572,15 +572,14 @@ private static class AckCountDownListener implements Discovery.AckListener { ThreadPool threadPool) { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; - this.nodes = nodes; + this.masterNode = nodes.getMasterNode(); int countDown = 0; for (DiscoveryNode node : nodes) { - if (ackedTaskListener.mustAck(node)) { + //we always wait for at least the master node + if (node.equals(masterNode) || ackedTaskListener.mustAck(node)) { countDown++; } } - //we always wait for at least 1 node (the master) - countDown = Math.max(1, countDown); logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); this.countDown = new CountDown(countDown); this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, () -> onTimeout()); @@ -588,11 +587,8 @@ private static class AckCountDownListener implements Discovery.AckListener { @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (!ackedTaskListener.mustAck(node)) { - //we always wait for the master ack anyway - if (!node.equals(nodes.getMasterNode())) { - return; - } + if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { + return; } if (e == null) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c19cbe4687ce6..d9cf0f630c0f2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -227,7 +227,6 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, - HttpTransportSettings.SETTING_PIPELINING, HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, HttpTransportSettings.SETTING_HTTP_HOST, HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 218e6e3f63f95..d19cc98441b79 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -44,6 +44,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -66,13 +67,16 @@ public void onPingReceived(PingRequest pingRequest) {} private final ConcurrentMap nodesFD = newConcurrentMap(); - private volatile long clusterStateVersion = ClusterState.UNKNOWN_VERSION; + private final Supplier clusterStateSupplier; private volatile DiscoveryNode localNode; - public NodesFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName) { + public NodesFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, + Supplier clusterStateSupplier, ClusterName clusterName) { super(settings, threadPool, transportService, clusterName); + this.clusterStateSupplier = clusterStateSupplier; + logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); @@ -208,15 +212,18 @@ private boolean running() { return NodeFD.this.equals(nodesFD.get(node)); } + private PingRequest newPingRequest() { + return new PingRequest(node, clusterName, localNode, clusterStateSupplier.get().version()); + } + @Override public void run() { if (!running()) { return; } - final PingRequest pingRequest = new PingRequest(node, clusterName, localNode, clusterStateVersion); final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING) .withTimeout(pingRetryTimeout).build(); - transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, new TransportResponseHandler() { + transportService.sendRequest(node, PING_ACTION_NAME, newPingRequest(), options, new TransportResponseHandler() { @Override public PingResponse newInstance() { return new PingResponse(); @@ -254,7 +261,7 @@ public void handleException(TransportException exp) { } } else { // resend the request, not reschedule, rely on send timeout - transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, this); + transportService.sendRequest(node, PING_ACTION_NAME, newPingRequest(), options, this); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 4621e6769e962..02b2822fcf431 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -205,7 +205,7 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, this::clusterState, masterService, clusterName); this.masterFD.addListener(new MasterNodeFailureListener()); - this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterName); + this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, this::clusterState, clusterName); this.nodesFD.addListener(new NodeFaultDetectionListener()); this.pendingStatesQueue = new PendingClusterStatesQueue(logger, MAX_PENDING_CLUSTER_STATES_SETTING.get(settings)); diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index f86049292f3fd..df038e8303edb 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -29,9 +29,11 @@ public class HttpHandlingSettings { private final boolean compression; private final int compressionLevel; private final boolean detailedErrorsEnabled; + private final int pipeliningMaxEvents; public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, - boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled) { + boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, + int pipeliningMaxEvents) { this.maxContentLength = maxContentLength; this.maxChunkSize = maxChunkSize; this.maxHeaderSize = maxHeaderSize; @@ -40,6 +42,7 @@ public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeade this.compression = compression; this.compressionLevel = compressionLevel; this.detailedErrorsEnabled = detailedErrorsEnabled; + this.pipeliningMaxEvents = pipeliningMaxEvents; } public int getMaxContentLength() { @@ -73,4 +76,8 @@ public int getCompressionLevel() { public boolean getDetailedErrorsEnabled() { return detailedErrorsEnabled; } + + public int getPipeliningMaxEvents() { + return pipeliningMaxEvents; + } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java new file mode 100644 index 0000000000000..7db8666e73ae3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +public class HttpPipelinedMessage implements Comparable { + + private final int sequence; + + public HttpPipelinedMessage(int sequence) { + this.sequence = sequence; + } + + public int getSequence() { + return sequence; + } + + @Override + public int compareTo(HttpPipelinedMessage o) { + return Integer.compare(sequence, o.sequence); + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java new file mode 100644 index 0000000000000..df8bd7ee1eb80 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +public class HttpPipelinedRequest extends HttpPipelinedMessage { + + private final R request; + + HttpPipelinedRequest(int sequence, R request) { + super(sequence); + this.request = request; + } + + public R getRequest() { + return request; + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java b/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java new file mode 100644 index 0000000000000..f38e9677979db --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +import org.elasticsearch.common.collect.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.PriorityQueue; + +public class HttpPipeliningAggregator { + + private final int maxEventsHeld; + private final PriorityQueue> outboundHoldingQueue; + /* + * The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the + * channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the + * current write sequence, implying that all preceding messages have been written. + */ + private int readSequence; + private int writeSequence; + + public HttpPipeliningAggregator(int maxEventsHeld) { + this.maxEventsHeld = maxEventsHeld; + this.outboundHoldingQueue = new PriorityQueue<>(1, Comparator.comparing(Tuple::v1)); + } + + public HttpPipelinedRequest read(final Request request) { + return new HttpPipelinedRequest<>(readSequence++, request); + } + + public List> write(final Response response, Listener listener) { + if (outboundHoldingQueue.size() < maxEventsHeld) { + ArrayList> readyResponses = new ArrayList<>(); + outboundHoldingQueue.add(new Tuple<>(response, listener)); + while (!outboundHoldingQueue.isEmpty()) { + /* + * Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence + * number does not match the current write sequence number then we have not processed all preceding responses yet. + */ + final Tuple top = outboundHoldingQueue.peek(); + + if (top.v1().getSequence() != writeSequence) { + break; + } + outboundHoldingQueue.poll(); + readyResponses.add(top); + writeSequence++; + } + + return readyResponses; + } else { + int eventCount = outboundHoldingQueue.size() + 1; + throw new IllegalStateException("Too many pipelined events [" + eventCount + "]. Max events allowed [" + + maxEventsHeld + "]."); + } + } + + public List> removeAllInflightResponses() { + ArrayList> responses = new ArrayList<>(outboundHoldingQueue); + outboundHoldingQueue.clear(); + return responses; + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 98451e0c304b9..4670137d09a54 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -49,8 +49,6 @@ public final class HttpTransportSettings { new Setting<>("http.cors.allow-headers", "X-Requested-With,Content-Type,Content-Length", (value) -> value, Property.NodeScope); public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); - public static final Setting SETTING_PIPELINING = - Setting.boolSetting("http.pipelining", true, Property.NodeScope); public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope); public static final Setting SETTING_HTTP_COMPRESSION = diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index af50dfbcb4e18..cf08d93a197de 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -449,6 +449,10 @@ public Mapper parse(ParseContext context) throws IOException { // index for (Map.Entry completionInput : inputMap.entrySet()) { String input = completionInput.getKey(); + if (input.trim().isEmpty()) { + context.addIgnoredField(fieldType.name()); + continue; + } // truncate input if (input.length() > maxInputLength) { int len = Math.min(maxInputLength, input.length()); diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index d7d9797dad88b..490f8ee72c45d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -29,6 +29,7 @@ import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -77,6 +78,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder supplier = new SetOnce<>(); queryRewriteContext.registerAsyncAction((client, listener) -> { GetRequest getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId); + getRequest.routing(indexedShapeRouting); fetch(client, getRequest, indexedShapePath, ActionListener.wrap(builder-> { supplier.set(builder); listener.onResponse(null); diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 6bb8e0259fb15..92da1bc3b65f9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -45,6 +46,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentParser.Token.END_OBJECT; @@ -65,7 +67,8 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { PARSER.declareBoolean(InnerHitBuilder::setVersion, SearchSourceBuilder.VERSION_FIELD); PARSER.declareBoolean(InnerHitBuilder::setTrackScores, SearchSourceBuilder.TRACK_SCORES_FIELD); PARSER.declareStringArray(InnerHitBuilder::setStoredFieldNames, SearchSourceBuilder.STORED_FIELDS_FIELD); - PARSER.declareStringArray(InnerHitBuilder::setDocValueFields, SearchSourceBuilder.DOCVALUE_FIELDS_FIELD); + PARSER.declareObjectArray(InnerHitBuilder::setDocValueFields, + (p,c) -> FieldAndFormat.fromXContent(p), SearchSourceBuilder.DOCVALUE_FIELDS_FIELD); PARSER.declareField((p, i, c) -> { try { Set scriptFields = new HashSet<>(); @@ -102,7 +105,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { private StoredFieldsContext storedFieldsContext; private QueryBuilder query = DEFAULT_INNER_HIT_QUERY; private List> sorts; - private List docValueFields; + private List docValueFields; private Set scriptFields; private HighlightBuilder highlightBuilder; private FetchSourceContext fetchSourceContext; @@ -134,7 +137,18 @@ public InnerHitBuilder(StreamInput in) throws IOException { version = in.readBoolean(); trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); - docValueFields = (List) in.readGenericValue(); + if (in.getVersion().before(Version.V_6_4_0)) { + List fieldList = (List) in.readGenericValue(); + if (fieldList == null) { + docValueFields = null; + } else { + docValueFields = fieldList.stream() + .map(field -> new FieldAndFormat(field, null)) + .collect(Collectors.toList()); + } + } else { + docValueFields = in.readBoolean() ? in.readList(FieldAndFormat::new) : null; + } if (in.readBoolean()) { int size = in.readVInt(); scriptFields = new HashSet<>(size); @@ -174,7 +188,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(version); out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields); + if (out.getVersion().before(Version.V_6_4_0)) { + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); + } else { + out.writeBoolean(docValueFields != null); + if (docValueFields != null) { + out.writeList(docValueFields); + } + } boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); if (hasScriptFields) { @@ -248,7 +271,9 @@ private void writeToBWC(StreamOutput out, out.writeBoolean(version); out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields); + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); if (hasScriptFields) { @@ -390,14 +415,14 @@ public InnerHitBuilder setStoredFieldNames(List fieldNames) { /** * Gets the docvalue fields. */ - public List getDocValueFields() { + public List getDocValueFields() { return docValueFields; } /** * Sets the stored fields to load from the docvalue and return. */ - public InnerHitBuilder setDocValueFields(List docValueFields) { + public InnerHitBuilder setDocValueFields(List docValueFields) { this.docValueFields = docValueFields; return this; } @@ -405,14 +430,21 @@ public InnerHitBuilder setDocValueFields(List docValueFields) { /** * Adds a field to load from the docvalue and return. */ - public InnerHitBuilder addDocValueField(String field) { + public InnerHitBuilder addDocValueField(String field, String format) { if (docValueFields == null) { docValueFields = new ArrayList<>(); } - docValueFields.add(field); + docValueFields.add(new FieldAndFormat(field, null)); return this; } + /** + * Adds a field to load from doc values and return. + */ + public InnerHitBuilder addDocValueField(String field) { + return addDocValueField(field, null); + } + public Set getScriptFields() { return scriptFields; } @@ -489,8 +521,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (docValueFields != null) { builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName()); - for (String docValueField : docValueFields) { - builder.value(docValueField); + for (FieldAndFormat docValueField : docValueFields) { + if (docValueField.format == null) { + builder.value(docValueField.field); + } else { + builder.startObject() + .field("field", docValueField.field) + .field("format", docValueField.format) + .endObject(); + } } builder.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 60741c87f2165..2695c1728491b 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -52,8 +52,10 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope); + // mostly a way to prevent queries from being the main source of memory usage + // of the cache public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = - Setting.intSetting("indices.queries.cache.count", 1000, 1, Property.NodeScope); + Setting.intSetting("indices.queries.cache.count", 10_000, 1, Property.NodeScope); // enables caching on all segments instead of only the larger ones, for testing only public static final Setting INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting("indices.queries.cache.all_segments", false, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index d917018eb8eb5..c5df30a2d8771 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -231,6 +231,7 @@ public static final Settings addNodeNameIfNeeded(Settings settings, final String private final Lifecycle lifecycle = new Lifecycle(); private final Injector injector; private final Settings settings; + private final Settings originalSettings; private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; @@ -261,6 +262,7 @@ protected Node(final Environment environment, Collection logger.info("initializing ..."); } try { + originalSettings = environment.settings(); Settings tmpSettings = Settings.builder().put(environment.settings()) .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build(); @@ -568,7 +570,14 @@ protected void processRecoverySettings(ClusterSettings clusterSettings, Recovery } /** - * The settings that were used to create the node. + * The original settings that were used to create the node + */ + public Settings originalSettings() { + return originalSettings; + } + + /** + * The settings that are used by this node. Contains original settings as well as additional settings provided by plugins. */ public Settings settings() { return this.settings; diff --git a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java index 5e58aa5a3a926..61145c7a1d7cf 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java @@ -70,6 +70,8 @@ default void onNodeStarted() { * Returns a map of {@link ClusterState.Custom} supplier that should be invoked to initialize the initial clusterstate. * This allows custom clusterstate extensions to be always present and prevents invariants where clusterstates are published * but customs are not initialized. + * + * TODO: Remove this whole concept of InitialClusterStateCustomSupplier, it's not used anymore */ default Map> getInitialClusterStateCustomSupplier() { return Collections.emptyMap(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index d2740827d1ebf..4b7bb9d8de0fa 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -49,7 +49,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); - deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 3098dc03a8c71..4074d1a8fa1be 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -214,7 +214,7 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil if (Strings.hasText(sDocValueFields)) { String[] sFields = Strings.splitStringByCommaToArray(sDocValueFields); for (String field : sFields) { - searchSourceBuilder.docValueField(field); + searchSourceBuilder.docValueField(field, null); } } } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 1824f17941b0e..8677370fc9927 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -49,17 +49,17 @@ public interface DocValueFormat extends NamedWriteable { /** Format a long value. This is used by terms and histogram aggregations * to format keys for fields that use longs as a doc value representation * such as the {@code long} and {@code date} fields. */ - String format(long value); + Object format(long value); /** Format a double value. This is used by terms and stats aggregations * to format keys for fields that use numbers as a doc value representation * such as the {@code long}, {@code double} or {@code date} fields. */ - String format(double value); + Object format(double value); /** Format a binary value. This is used by terms aggregations to format * keys for fields that use binary doc value representations such as the * {@code keyword} and {@code ip} fields. */ - String format(BytesRef value); + Object format(BytesRef value); /** Parse a value that was formatted with {@link #format(long)} back to the * original long value. */ @@ -85,13 +85,13 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public String format(long value) { - return Long.toString(value); + public Long format(long value) { + return value; } @Override - public String format(double value) { - return Double.toString(value); + public Double format(double value) { + return value; } @Override @@ -235,13 +235,13 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public String format(long value) { - return java.lang.Boolean.valueOf(value != 0).toString(); + public Boolean format(long value) { + return java.lang.Boolean.valueOf(value != 0); } @Override - public String format(double value) { - return java.lang.Boolean.valueOf(value != 0).toString(); + public Boolean format(double value) { + return java.lang.Boolean.valueOf(value != 0); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index c72e9d22dc0ae..bb391f21f1e40 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -407,8 +407,8 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { final long high = nextTransition; final DocValueFormat format = ft.docValueFormat(null, null); - final String formattedLow = format.format(low); - final String formattedHigh = format.format(high); + final Object formattedLow = format.format(low); + final Object formattedHigh = format.format(high); if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh, true, false, tz, null, context) == Relation.WITHIN) { // All values in this reader have the same offset despite daylight saving times. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index dfa12db0cd31c..84dec2c983e28 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -107,7 +107,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String getKeyAsString() { - return format.format(key); + return format.format(key).toString(); } @Override @@ -138,7 +138,7 @@ Bucket reduce(List buckets, ReduceContext context) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - String keyAsString = format.format(key); + String keyAsString = format.format(key).toString(); if (keyed) { builder.startObject(keyAsString); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index b3516b04dfc3c..1831e012a318c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -103,7 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String getKeyAsString() { - return format.format(key); + return format.format(key).toString(); } @Override @@ -134,7 +134,7 @@ Bucket reduce(List buckets, ReduceContext context) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - String keyAsString = format.format(key); + String keyAsString = format.format(key).toString(); if (keyed) { builder.startObject(keyAsString); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index e7a3c35231c1d..afa3be702cc33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -155,7 +155,7 @@ public Object getFrom() { @Override public String getFromAsString() { - return from == null ? null : format.format(from); + return from == null ? null : format.format(from).toString(); } @Override @@ -165,7 +165,7 @@ public Object getTo() { @Override public String getToAsString() { - return to == null ? null : format.format(to); + return to == null ? null : format.format(to).toString(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 9485d534ab9fa..cde90727e49fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -98,7 +98,7 @@ public String getFromAsString() { if (Double.isInfinite(from)) { return null; } else { - return format.format(from); + return format.format(from).toString(); } } @@ -107,7 +107,7 @@ public String getToAsString() { if (Double.isInfinite(to)) { return null; } else { - return format.format(to); + return format.format(to).toString(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index 29a8e16d9b942..2d22b61472a5a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -78,7 +78,7 @@ public Object getKey() { @Override public String getKeyAsString() { - return format.format(term); + return format.format(term).toString(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index b170171f5cfad..a73ee1818cf6a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -83,7 +83,7 @@ public Number getKeyAsNumber() { @Override public String getKeyAsString() { - return format.format(termBytes); + return format.format(termBytes).toString(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index da87e59a6fdfd..abdc195b514a1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -153,12 +153,12 @@ private long getBackgroundFrequency(String value) throws IOException { } public long getBackgroundFrequency(BytesRef termBytes) throws IOException { - String value = config.format().format(termBytes); + String value = config.format().format(termBytes).toString(); return getBackgroundFrequency(value); } public long getBackgroundFrequency(long termNum) throws IOException { - String value = config.format().format(termNum); + String value = config.format().format(termNum).toString(); return getBackgroundFrequency(value); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java index 688acfae413ca..c35b0bfd2d095 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java @@ -135,7 +135,7 @@ private long getBackgroundFrequency(String value) throws IOException { } public long getBackgroundFrequency(BytesRef termBytes) throws IOException { - String value = format.format(termBytes); + String value = format.format(termBytes).toString(); return getBackgroundFrequency(value); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index b87ce51c4bd25..65e684e315027 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -63,7 +63,7 @@ protected void writeTermTo(StreamOutput out) throws IOException { @Override public String getKeyAsString() { - return format.format(term); + return format.format(term).toString(); } @Override @@ -90,7 +90,7 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 025c397d3bd00..7857df8713b6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -63,7 +63,7 @@ protected void writeTermTo(StreamOutput out) throws IOException { @Override public String getKeyAsString() { - return format.format(term); + return format.format(term).toString(); } @Override @@ -90,7 +90,7 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 11d0b40c7ce39..4971f74f03dc5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -80,7 +80,7 @@ public Number getKeyAsNumber() { @Override public String getKeyAsString() { - return format.format(termBytes); + return format.format(termBytes).toString(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index dba16397fc050..b3439671580e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -48,7 +48,7 @@ protected SingleValue(StreamInput in) throws IOException { @Override public String getValueAsString() { - return format.format(value()); + return format.format(value()).toString(); } @Override @@ -79,7 +79,7 @@ protected MultiValue(StreamInput in) throws IOException { public abstract double value(String name); public String valueAsString(String name) { - return format.format(value(name)); + return format.format(value(name)).toString(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index c30574c576de8..285ea469aed9e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -113,7 +113,7 @@ public InternalAvg doReduce(List aggregations, ReduceContex public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(CommonFields.VALUE.getPreferredName(), count != 0 ? getValue() : null); if (count != 0 && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue())); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue()).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java index 112d379362700..449351b88b169 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java @@ -85,7 +85,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th boolean hasValue = !Double.isInfinite(max); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? max : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java index dcf180dde89a4..886642c222baf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java @@ -85,7 +85,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th boolean hasValue = !Double.isInfinite(min); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? min : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java index 2ff3e6d3becb2..48d35de6cb6ab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java @@ -137,7 +137,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.field(CommonFields.KEY.getPreferredName(), keys[i]); builder.field(CommonFields.VALUE.getPreferredName(), value); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java index 143775b5bfd15..3806d7feb9550 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java @@ -120,7 +120,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.field(CommonFields.KEY.getPreferredName(), keys[i]); builder.field(CommonFields.VALUE.getPreferredName(), value); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index fb64d168db6aa..cedcdd4aab07d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -95,7 +95,7 @@ public InternalSum doReduce(List aggregations, ReduceContex public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(CommonFields.VALUE.getPreferredName(), sum); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index a528814e2891d..836bd7fc9ff03 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -70,7 +71,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = null; private HighlightBuilder highlightBuilder; private StoredFieldsContext storedFieldsContext; - private List fieldDataFields; + private List docValueFields; private Set scriptFields; private FetchSourceContext fetchSourceContext; @@ -91,7 +92,7 @@ protected TopHitsAggregationBuilder(TopHitsAggregationBuilder clone, new HighlightBuilder(clone.highlightBuilder, clone.highlightBuilder.highlightQuery(), clone.highlightBuilder.fields()); this.storedFieldsContext = clone.storedFieldsContext == null ? null : new StoredFieldsContext(clone.storedFieldsContext); - this.fieldDataFields = clone.fieldDataFields == null ? null : new ArrayList<>(clone.fieldDataFields); + this.docValueFields = clone.docValueFields == null ? null : new ArrayList<>(clone.docValueFields); this.scriptFields = clone.scriptFields == null ? null : new HashSet<>(clone.scriptFields); this.fetchSourceContext = clone.fetchSourceContext == null ? null : new FetchSourceContext(clone.fetchSourceContext.fetchSource(), clone.fetchSourceContext.includes(), @@ -112,9 +113,9 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { int size = in.readVInt(); - fieldDataFields = new ArrayList<>(size); + docValueFields = new ArrayList<>(size); for (int i = 0; i < size; i++) { - fieldDataFields.add(in.readString()); + docValueFields.add(new FieldAndFormat(in)); } } storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); @@ -143,12 +144,12 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(explain); out.writeOptionalWriteable(fetchSourceContext); - boolean hasFieldDataFields = fieldDataFields != null; + boolean hasFieldDataFields = docValueFields != null; out.writeBoolean(hasFieldDataFields); if (hasFieldDataFields) { - out.writeVInt(fieldDataFields.size()); - for (String fieldName : fieldDataFields) { - out.writeString(fieldName); + out.writeVInt(docValueFields.size()); + for (FieldAndFormat ff : docValueFields) { + ff.writeTo(out); } } out.writeOptionalWriteable(storedFieldsContext); @@ -404,40 +405,33 @@ public StoredFieldsContext storedFields() { } /** - * Adds a field to load from the field data cache and return as part of + * Adds a field to load from doc values and return as part of * the search request. */ - public TopHitsAggregationBuilder fieldDataField(String fieldDataField) { - if (fieldDataField == null) { - throw new IllegalArgumentException("[fieldDataField] must not be null: [" + name + "]"); + public TopHitsAggregationBuilder docValueField(String docValueField, String format) { + if (docValueField == null) { + throw new IllegalArgumentException("[docValueField] must not be null: [" + name + "]"); } - if (fieldDataFields == null) { - fieldDataFields = new ArrayList<>(); + if (docValueFields == null) { + docValueFields = new ArrayList<>(); } - fieldDataFields.add(fieldDataField); + docValueFields.add(new FieldAndFormat(docValueField, format)); return this; } /** - * Adds fields to load from the field data cache and return as part of + * Adds a field to load from doc values and return as part of * the search request. */ - public TopHitsAggregationBuilder fieldDataFields(List fieldDataFields) { - if (fieldDataFields == null) { - throw new IllegalArgumentException("[fieldDataFields] must not be null: [" + name + "]"); - } - if (this.fieldDataFields == null) { - this.fieldDataFields = new ArrayList<>(); - } - this.fieldDataFields.addAll(fieldDataFields); - return this; + public TopHitsAggregationBuilder docValueField(String docValueField) { + return docValueField(docValueField, null); } /** * Gets the field-data fields. */ - public List fieldDataFields() { - return fieldDataFields; + public List fieldDataFields() { + return docValueFields; } /** @@ -587,7 +581,7 @@ protected TopHitsAggregatorFactory doBuild(SearchContext context, AggregatorFact optionalSort = SortBuilder.buildSort(sorts, context.getQueryShardContext()); } return new TopHitsAggregatorFactory(name, from, size, explain, version, trackScores, optionalSort, highlightBuilder, - storedFieldsContext, fieldDataFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData); + storedFieldsContext, docValueFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData); } @Override @@ -603,10 +597,15 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param if (storedFieldsContext != null) { storedFieldsContext.toXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), builder); } - if (fieldDataFields != null) { + if (docValueFields != null) { builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName()); - for (String fieldDataField : fieldDataFields) { - builder.value(fieldDataField); + for (FieldAndFormat dvField : docValueFields) { + builder.startObject() + .field("field", dvField.field); + if (dvField.format != null) { + builder.field("format", dvField.format); + } + builder.endObject(); } builder.endArray(); } @@ -725,14 +724,9 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa } else if (SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List fieldDataFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - fieldDataFields.add(parser.text()); - } else { - throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING - + "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); - } + FieldAndFormat ff = FieldAndFormat.fromXContent(parser); + factory.docValueField(ff.field, ff.format); } - factory.fieldDataFields(fieldDataFields); } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List> sorts = SortBuilder.fromXContent(parser); factory.sorts(sorts); @@ -752,7 +746,7 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa @Override protected int doHashCode() { - return Objects.hash(explain, fetchSourceContext, fieldDataFields, storedFieldsContext, from, highlightBuilder, + return Objects.hash(explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, scriptFields, size, sorts, trackScores, version); } @@ -761,7 +755,7 @@ protected boolean doEquals(Object obj) { TopHitsAggregationBuilder other = (TopHitsAggregationBuilder) obj; return Objects.equals(explain, other.explain) && Objects.equals(fetchSourceContext, other.fetchSourceContext) - && Objects.equals(fieldDataFields, other.fieldDataFields) + && Objects.equals(docValueFields, other.docValueFields) && Objects.equals(storedFieldsContext, other.storedFieldsContext) && Objects.equals(from, other.from) && Objects.equals(highlightBuilder, other.highlightBuilder) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java index 09c26b169e528..416c984610503 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -47,13 +48,13 @@ public class TopHitsAggregatorFactory extends AggregatorFactory sort; private final HighlightBuilder highlightBuilder; private final StoredFieldsContext storedFieldsContext; - private final List docValueFields; + private final List docValueFields; private final List scriptFields; private final FetchSourceContext fetchSourceContext; TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, - List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, + List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java index 4fa4f1f6c8620..2eac04a9581be 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java @@ -85,7 +85,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java index 76a240d317839..a7ef024028f60 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java @@ -108,7 +108,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th boolean hasValue = !Double.isInfinite(value); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } builder.startArray(KEYS_FIELD.getPreferredName()); for (String key : keys) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java index 42a8b28a51a9f..5d13638f70a34 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java @@ -97,7 +97,7 @@ public double percentile(double percent) throws IllegalArgumentException { @Override public String percentileAsString(double percent) { - return format.format(percentile(percent)); + return format.format(percentile(percent)).toString(); } DocValueFormat formatter() { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c4a6b3da6b1d4..c42a1a12a1877 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -47,6 +47,7 @@ import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.internal.SearchContext; @@ -64,6 +65,7 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; @@ -162,7 +164,7 @@ public static HighlightBuilder highlight() { private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; private StoredFieldsContext storedFieldsContext; - private List docValueFields; + private List docValueFields; private List scriptFields; private FetchSourceContext fetchSourceContext; @@ -197,7 +199,22 @@ public SearchSourceBuilder(StreamInput in) throws IOException { aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); explain = in.readOptionalBoolean(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); - docValueFields = (List) in.readGenericValue(); + if (in.getVersion().before(Version.V_6_4_0)) { + List dvFields = (List) in.readGenericValue(); + if (dvFields == null) { + docValueFields = null; + } else { + docValueFields = dvFields.stream() + .map(field -> new FieldAndFormat(field, null)) + .collect(Collectors.toList()); + } + } else { + if (in.readBoolean()) { + docValueFields = in.readList(FieldAndFormat::new); + } else { + docValueFields = null; + } + } storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); from = in.readVInt(); highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); @@ -246,7 +263,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(aggregations); out.writeOptionalBoolean(explain); out.writeOptionalWriteable(fetchSourceContext); - out.writeGenericValue(docValueFields); + if (out.getVersion().before(Version.V_6_4_0)) { + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); + } else { + out.writeBoolean(docValueFields != null); + if (docValueFields != null) { + out.writeList(docValueFields); + } + } out.writeOptionalWriteable(storedFieldsContext); out.writeVInt(from); out.writeOptionalWriteable(highlightBuilder); @@ -764,22 +790,30 @@ public StoredFieldsContext storedFields() { /** * Gets the docvalue fields. */ - public List docValueFields() { + public List docValueFields() { return docValueFields; } /** - * Adds a field to load from the docvalue and return as part of the + * Adds a field to load from the doc values and return as part of the * search request. */ - public SearchSourceBuilder docValueField(String name) { + public SearchSourceBuilder docValueField(String name, @Nullable String format) { if (docValueFields == null) { docValueFields = new ArrayList<>(); } - docValueFields.add(name); + docValueFields.add(new FieldAndFormat(name, format)); return this; } + /** + * Adds a field to load from the doc values and return as part of the + * search request. + */ + public SearchSourceBuilder docValueField(String name) { + return docValueField(name, null); + } + /** * Adds a script field under the given name with the provided script. * @@ -1076,12 +1110,7 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { docValueFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - docValueFields.add(parser.text()); - } else { - throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + - "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); - } + docValueFields.add(FieldAndFormat.fromXContent(parser)); } } else if (INDICES_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { @@ -1177,8 +1206,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (docValueFields != null) { builder.startArray(DOCVALUE_FIELDS_FIELD.getPreferredName()); - for (String docValueField : docValueFields) { - builder.value(docValueField); + for (FieldAndFormat docValueField : docValueFields) { + builder.startObject() + .field("field", docValueField.field); + if (docValueField.format != null) { + builder.field("format", docValueField.format); + } + builder.endObject(); } builder.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java index 325d28e459282..cf1596fd326b9 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java @@ -18,23 +18,111 @@ */ package org.elasticsearch.search.fetch.subphase; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; + +import java.io.IOException; import java.util.List; +import java.util.Objects; /** * All the required context to pull a field from the doc values. */ public class DocValueFieldsContext { - private final List fields; + public static final String USE_DEFAULT_FORMAT = "use_field_mapping"; + + /** + * Wrapper around a field name and the format that should be used to + * display values of this field. + */ + public static final class FieldAndFormat implements Writeable { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("script", + a -> new FieldAndFormat((String) a[0], (String) a[1])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("field")); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField("format")); + } + + /** + * Parse a {@link FieldAndFormat} from some {@link XContent}. + */ + public static FieldAndFormat fromXContent(XContentParser parser) throws IOException { + Token token = parser.currentToken(); + if (token.isValue()) { + return new FieldAndFormat(parser.text(), null); + } else { + return PARSER.apply(parser, null); + } + } + + /** The name of the field. */ + public final String field; + + /** The format of the field, or {@code null} if defaults should be used. */ + public final String format; + + /** Sole constructor. */ + public FieldAndFormat(String field, @Nullable String format) { + this.field = Objects.requireNonNull(field); + this.format = format; + } + + /** Serialization constructor. */ + public FieldAndFormat(StreamInput in) throws IOException { + this.field = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + format = in.readOptionalString(); + } else { + format = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(format); + } + } + + @Override + public int hashCode() { + int h = field.hashCode(); + h = 31 * h + Objects.hashCode(format); + return h; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + FieldAndFormat other = (FieldAndFormat) obj; + return field.equals(other.field) && Objects.equals(format, other.format); + } + + } + + private final List fields; - public DocValueFieldsContext(List fields) { + public DocValueFieldsContext(List fields) { this.fields = fields; } /** * Returns the required docvalue fields */ - public List fields() { + public List fields() { return this.fields; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 60def08c89104..a1562e118fb86 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -20,19 +20,32 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.index.SortedNumericDocValues; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.AtomicFieldData; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; +import java.util.List; +import java.util.Objects; /** * Query sub phase which pulls data from doc values @@ -41,6 +54,8 @@ */ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(DocValueFieldsFetchSubPhase.class)); + @Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { @@ -48,9 +63,10 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept // retrieve the `doc_value` associated with the collapse field String name = context.collapse().getFieldType().name(); if (context.docValueFieldsContext() == null) { - context.docValueFieldsContext(new DocValueFieldsContext(Collections.singletonList(name))); - } else if (context.docValueFieldsContext().fields().contains(name) == false) { - context.docValueFieldsContext().fields().add(name); + context.docValueFieldsContext(new DocValueFieldsContext( + Collections.singletonList(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)))); + } else if (context.docValueFieldsContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) { + context.docValueFieldsContext().fields().add(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)); } } @@ -59,24 +75,51 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept } hits = hits.clone(); // don't modify the incoming hits - Arrays.sort(hits, (a, b) -> Integer.compare(a.docId(), b.docId())); + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); - for (String field : context.docValueFieldsContext().fields()) { + for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { + String field = fieldAndFormat.field; MappedFieldType fieldType = context.mapperService().fullName(field); if (fieldType != null) { + final IndexFieldData indexFieldData = context.getForField(fieldType); + final DocValueFormat format; + if (fieldAndFormat.format == null) { + DEPRECATION_LOGGER.deprecated("Doc-value field [" + fieldAndFormat.field + "] is not using a format. The output will " + + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + + "[format={}] with the doc value field in order to opt in for the future behaviour and ease the migration to " + + "7.0.", DocValueFieldsContext.USE_DEFAULT_FORMAT); + format = null; + } else { + String formatDesc = fieldAndFormat.format; + if (Objects.equals(formatDesc, DocValueFieldsContext.USE_DEFAULT_FORMAT)) { + formatDesc = null; + } + format = fieldType.docValueFormat(formatDesc, null); + } LeafReaderContext subReaderContext = null; AtomicFieldData data = null; - ScriptDocValues values = null; + ScriptDocValues scriptValues = null; // legacy + SortedBinaryDocValues binaryValues = null; // binary / string / ip fields + SortedNumericDocValues longValues = null; // int / date fields + SortedNumericDoubleValues doubleValues = null; // floating-point fields for (SearchHit hit : hits) { // if the reader index has changed we need to get a new doc values reader instance if (subReaderContext == null || hit.docId() >= subReaderContext.docBase + subReaderContext.reader().maxDoc()) { int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); - data = context.getForField(fieldType).load(subReaderContext); - values = data.getScriptValues(); + data = indexFieldData.load(subReaderContext); + if (format == null) { + scriptValues = data.getScriptValues(); + } else if (indexFieldData instanceof IndexNumericFieldData) { + if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { + doubleValues = ((AtomicNumericFieldData) data).getDoubleValues(); + } else { + longValues = ((AtomicNumericFieldData) data).getLongValues(); + } + } else { + binaryValues = data.getBytesValues(); + } } - int subDocId = hit.docId() - subReaderContext.docBase; - values.setNextDocId(subDocId); if (hit.fieldsOrNull() == null) { hit.fields(new HashMap<>(2)); } @@ -85,7 +128,33 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept hitField = new DocumentField(field, new ArrayList<>(2)); hit.getFields().put(field, hitField); } - hitField.getValues().addAll(values); + final List values = hitField.getValues(); + + int subDocId = hit.docId() - subReaderContext.docBase; + if (scriptValues != null) { + scriptValues.setNextDocId(subDocId); + values.addAll(scriptValues); + } else if (binaryValues != null) { + if (binaryValues.advanceExact(subDocId)) { + for (int i = 0, count = binaryValues.docValueCount(); i < count; ++i) { + values.add(format.format(binaryValues.nextValue())); + } + } + } else if (longValues != null) { + if (longValues.advanceExact(subDocId)) { + for (int i = 0, count = longValues.docValueCount(); i < count; ++i) { + values.add(format.format(longValues.nextValue())); + } + } + } else if (doubleValues != null) { + if (doubleValues.advanceExact(subDocId)) { + for (int i = 0, count = doubleValues.docValueCount(); i < count; ++i) { + values.add(format.format(doubleValues.nextValue())); + } + } + } else { + throw new AssertionError("Unreachable code"); + } } } } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpChannel.java b/server/src/main/java/org/elasticsearch/transport/TcpChannel.java index 42f1417d79b32..1a022ee9f4856 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpChannel.java @@ -107,6 +107,15 @@ public interface TcpChannel extends Releasable { */ void sendMessage(BytesReference reference, ActionListener listener); + /** + * Closes the channel without blocking. + * + * @param channel to close + */ + static void closeChannel(C channel) { + closeChannel(channel, false); + } + /** * Closes the channel. * diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 033b72d04d985..04a882f3e8b45 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -983,7 +983,7 @@ protected final void doStop() { protected void onException(TcpChannel channel, Exception e) { if (!lifecycle.started()) { // just close and ignore - we are already stopped and just need to make sure we release all resources - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); return; } @@ -991,20 +991,20 @@ protected void onException(TcpChannel channel, Exception e) { logger.trace(() -> new ParameterizedMessage( "close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (isConnectException(e)) { logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (e instanceof BindException) { logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (e instanceof CancelledKeyException) { logger.trace(() -> new ParameterizedMessage( "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (e instanceof TcpTransport.HttpOnTransportException) { // in case we are able to return data, serialize the exception content and sent it back to the client if (channel.isOpen()) { @@ -1012,13 +1012,13 @@ protected void onException(TcpChannel channel, Exception e) { final SendMetricListener closeChannel = new SendMetricListener(message.length()) { @Override protected void innerInnerOnResponse(Void v) { - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } @Override protected void innerOnFailure(Exception e) { logger.debug("failed to send message to httpOnTransport channel", e); - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } }; internalSendMessage(channel, message, closeChannel); @@ -1026,10 +1026,20 @@ protected void innerOnFailure(Exception e) { } else { logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } } + /** + * Exception handler for exceptions that are not associated with a specific channel. + * + * @param exception the exception + */ + protected void onNonChannelException(Exception exception) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception); + } + protected void serverAcceptedChannel(TcpChannel channel) { boolean addedOnThisCall = acceptedChannels.add(channel); assert addedOnThisCall : "Channel should only be added to accept channel set once"; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 0aa5691dc67ad..43d94f56e5af3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -67,7 +67,7 @@ public void testVerifyRepositoryWithBlocks() { try { setClusterReadOnly(true); VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); - assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponseTests.java new file mode 100644 index 0000000000000..fe97e778dadb8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponseTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.delete; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class DeleteRepositoryResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected DeleteRepositoryResponse doParseInstance(XContentParser parser) { + return DeleteRepositoryResponse.fromXContent(parser); + } + + @Override + protected DeleteRepositoryResponse createBlankInstance() { + return new DeleteRepositoryResponse(); + } + + @Override + protected DeleteRepositoryResponse createTestInstance() { + return new DeleteRepositoryResponse(randomBoolean()); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index c66fa4b244f18..5ca7cb1e5066d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -73,7 +73,7 @@ protected void setUpRepository() throws Exception { logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); - assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> create a snapshot"); CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java index 114af3c13e707..e8dd3943cb762 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -38,8 +39,9 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { UpdateSettingsRequest mutation = copyRequest(request); List mutators = new ArrayList<>(); - mutators.add(() -> mutation.masterNodeTimeout(randomTimeValue())); - mutators.add(() -> mutation.timeout(randomTimeValue())); + mutators.add(() -> mutation + .masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); + mutators.add(() -> mutation.timeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); mutators.add(() -> mutation.settings(mutateSettings(request.settings()))); mutators.add(() -> mutation.indices(mutateIndices(request.indices()))); mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(), @@ -72,7 +74,7 @@ public static UpdateSettingsRequest createTestItem() { private static UpdateSettingsRequest copyRequest(UpdateSettingsRequest request) { UpdateSettingsRequest result = new UpdateSettingsRequest(request.settings(), request.indices()); - result.masterNodeTimeout(request.timeout()); + result.masterNodeTimeout(request.masterNodeTimeout()); result.timeout(request.timeout()); result.indicesOptions(request.indicesOptions()); result.setPreserveExisting(request.isPreserveExisting()); diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index ab3f82fff75f5..a11ceddf28788 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -33,8 +34,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.PublishClusterStateAction; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Stream; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -43,6 +52,11 @@ @ClusterScope(scope = TEST, minNumDataNodes = 2) public class AckClusterUpdateSettingsIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -156,4 +170,32 @@ public void testOpenIndexNoAcknowledgement() { assertThat(openIndexResponse.isAcknowledged(), equalTo(false)); ensureGreen("test"); // make sure that recovery from disk has completed, so that check index doesn't fail. } + + public void testAckingFailsIfNotPublishedToAllNodes() { + String masterNode = internalCluster().getMasterName(); + String nonMasterNode = Stream.of(internalCluster().getNodeNames()) + .filter(node -> node.equals(masterNode) == false).findFirst().get(); + + MockTransportService masterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode); + MockTransportService nonMasterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode); + + logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode); + if (randomBoolean() && internalCluster().numMasterNodes() != 2) { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.SEND_ACTION_NAME); + } else { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.COMMIT_ACTION_NAME); + } + + CreateIndexResponse response = client().admin().indices().prepareCreate("test").get(); + assertFalse(response.isAcknowledged()); + + logger.info("waiting for cluster to reform"); + masterTransportService.clearRule(nonMasterTransportService); + + ensureStableCluster(internalCluster().size()); + + assertAcked(client().admin().indices().prepareDelete("test")); + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 344c5567a8657..f688291237813 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -53,7 +53,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -256,8 +255,8 @@ public void testElectMasterWithLatestVersion() throws Exception { internalCluster().setDisruptionScheme(isolatePreferredMaster); isolatePreferredMaster.startDisrupting(); - assertAcked(client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings( - Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0))); + client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings( + Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)).get(); internalCluster().clearDisruptionScheme(false); internalCluster().setDisruptionScheme(isolateAllNodes); diff --git a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index f32e93bb82dbd..03c0df43591ba 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -175,17 +175,19 @@ public void testNodesFaultDetectionConnectOnDisconnect() throws InterruptedExcep final Settings pingSettings = Settings.builder() .put(FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING.getKey(), shouldRetry) .put(FaultDetection.PING_INTERVAL_SETTING.getKey(), "5m").build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(buildNodesForA(true)).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()) + .nodes(buildNodesForA(true)).build(); NodesFaultDetection nodesFDA = new NodesFaultDetection(Settings.builder().put(settingsA).put(pingSettings).build(), - threadPool, serviceA, clusterState.getClusterName()); + threadPool, serviceA, () -> clusterState, clusterState.getClusterName()); nodesFDA.setLocalNode(nodeA); NodesFaultDetection nodesFDB = new NodesFaultDetection(Settings.builder().put(settingsB).put(pingSettings).build(), - threadPool, serviceB, clusterState.getClusterName()); + threadPool, serviceB, () -> clusterState, clusterState.getClusterName()); nodesFDB.setLocalNode(nodeB); final CountDownLatch pingSent = new CountDownLatch(1); nodesFDB.addListener(new NodesFaultDetection.Listener() { @Override public void onPingReceived(NodesFaultDetection.PingRequest pingRequest) { + assertThat(pingRequest.clusterStateVersion(), equalTo(clusterState.version())); pingSent.countDown(); } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index fc284b9f5e80c..fdc36152cc895 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -150,7 +150,6 @@ public Path nodeConfigPath(int nodeOrdinal) { internalCluster().getClusterName(), configurationSource, 0, - false, "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java index d119a27f22eb5..8621e7758383c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java @@ -36,8 +36,8 @@ public void setupProperties() { public void testValueFormat() { MappedFieldType ft = createDefaultFieldType(); - assertEquals("false", ft.docValueFormat(null, null).format(0)); - assertEquals("true", ft.docValueFormat(null, null).format(1)); + assertEquals(false, ft.docValueFormat(null, null).format(0)); + assertEquals(true, ft.docValueFormat(null, null).format(1)); } public void testValueForSearch() { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index be03a28a0aad2..1381b6e920559 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -397,6 +397,19 @@ public void testFieldValueValidation() throws Exception { assertThat(cause, instanceOf(IllegalArgumentException.class)); assertThat(cause.getMessage(), containsString("[0x1e]")); } + + // empty inputs are ignored + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .array("completion", " ", "") + .endObject()), + XContentType.JSON)); + assertThat(doc.docs().size(), equalTo(1)); + assertNull(doc.docs().get(0).get("completion")); + assertNotNull(doc.docs().get(0).getField("_ignored")); + IndexableField ignoredFields = doc.docs().get(0).getField("_ignored"); + assertThat(ignoredFields.stringValue(), equalTo("completion")); } public void testPrefixQueryType() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 3282077ba6a77..6356b2122edbf 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -59,6 +59,7 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase randomAlphaOfLengthBetween(1, 16))); } - innerHits.setDocValueFields(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); + innerHits.setDocValueFields(randomListStuff(16, + () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), + randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT))); // Random script fields deduped on their field name. Map scriptFields = new HashMap<>(); for (SearchSourceBuilder.ScriptField field: randomListStuff(16, InnerHitBuilderTests::randomScript)) { @@ -187,9 +191,9 @@ static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { modifiers.add(() -> copy.setName(randomValueOtherThan(copy.getName(), () -> randomAlphaOfLengthBetween(1, 16)))); modifiers.add(() -> { if (randomBoolean()) { - copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), () -> { - return randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16)); - })); + copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), + () -> randomListStuff(16, () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), + randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT)))); } else { copy.addDocValueField(randomAlphaOfLengthBetween(1, 16)); } diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 108b41d54a08e..c4292410d0ac5 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -68,7 +69,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.MockIndexEventListener; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportRequest; @@ -108,6 +108,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30228") // What if DV is corrupted? public class CorruptedFileIT extends ESIntegTestCase { @Override @@ -470,8 +471,6 @@ protected void sendRequest(Connection connection, long requestId, String action, * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577") - @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); @@ -520,10 +519,6 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I break; } } - if (snapshotState != SnapshotState.PARTIAL) { - logger.info("--> listing shard files for investigation"); - files.forEach(f -> logger.info("path: {}", f.toAbsolutePath())); - } assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); assertThat(corruptedFile, notNullValue()); } diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index afe421a2916b1..41a245aca9cd3 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -181,6 +181,12 @@ public void testDeleteCreateInOneBulk() throws Exception { logger.info("--> letting cluster proceed"); disruption.stopDisrupting(); ensureGreen(TimeValue.timeValueMinutes(30), "test"); + // due to publish_timeout of 0, wait for data node to have cluster state fully applied + assertBusy(() -> { + long masterClusterStateVersion = internalCluster().clusterService(internalCluster().getMasterName()).state().version(); + long dataClusterStateVersion = internalCluster().clusterService(dataNode).state().version(); + assertThat(masterClusterStateVersion, equalTo(dataClusterStateVersion)); + }); assertHitCount(client().prepareSearch("test").get(), 0); } diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 7bf5308eb635e..e5cfbf98b3db9 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -85,20 +85,20 @@ public void testSerialization() throws Exception { } public void testRawFormat() { - assertEquals("0", DocValueFormat.RAW.format(0)); - assertEquals("-1", DocValueFormat.RAW.format(-1)); - assertEquals("1", DocValueFormat.RAW.format(1)); + assertEquals(0L, DocValueFormat.RAW.format(0)); + assertEquals(-1L, DocValueFormat.RAW.format(-1)); + assertEquals(1L, DocValueFormat.RAW.format(1)); - assertEquals("0.0", DocValueFormat.RAW.format(0d)); - assertEquals("0.5", DocValueFormat.RAW.format(.5d)); - assertEquals("-1.0", DocValueFormat.RAW.format(-1d)); + assertEquals(0d, DocValueFormat.RAW.format(0d)); + assertEquals(.5d, DocValueFormat.RAW.format(.5d)); + assertEquals(-1d, DocValueFormat.RAW.format(-1d)); assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc"))); } public void testBooleanFormat() { - assertEquals("false", DocValueFormat.BOOLEAN.format(0)); - assertEquals("true", DocValueFormat.BOOLEAN.format(1)); + assertEquals(false, DocValueFormat.BOOLEAN.format(0)); + assertEquals(true, DocValueFormat.BOOLEAN.format(1)); } public void testIpFormat() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java index 884f9bfbe0d20..aa9d25af49e8a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java @@ -37,7 +37,7 @@ public class InternalSumTests extends InternalAggregationTestCase { @Override protected InternalSum createTestInstance(String name, List pipelineAggregators, Map metaData) { double value = frequently() ? randomDouble() : randomFrom(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN); - DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.BOOLEAN, DocValueFormat.RAW); + DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.RAW); return new InternalSum(name, value, formatter, pipelineAggregators, metaData); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 4f8493c0b001f..952eb22848e1a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -583,7 +583,7 @@ public void testFetchFeatures() { .highlighter(new HighlightBuilder().field("text")) .explain(true) .storedField("text") - .fieldDataField("field1") + .docValueField("field1") .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) @@ -865,7 +865,7 @@ public void testNestedFetchFeatures() { .addAggregation( nested("to-comments", "comments").subAggregation( topHits("top-comments").size(1).highlighter(new HighlightBuilder().field(hlField)).explain(true) - .fieldDataField("comments.user") + .docValueField("comments.user") .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())).fetchSource("comments.message", null) .version(true).sort("comments.date", SortOrder.ASC))).get(); assertHitCount(searchResponse, 2); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index e2ef28480fa79..4d2331b86f2ef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -81,7 +81,7 @@ protected final TopHitsAggregationBuilder createTestAggregatorBuilder() { if (randomBoolean()) { int fieldDataFieldsSize = randomInt(25); for (int i = 0; i < fieldDataFieldsSize; i++) { - factory.fieldDataField(randomAlphaOfLengthBetween(5, 50)); + factory.docValueField(randomAlphaOfLengthBetween(5, 50)); } } if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 36062860202a5..a8a2669ef9b4a 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -803,6 +803,65 @@ public void testFieldsPulledFromFieldData() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("text_field", "use_field_mapping") + .addDocValueField("keyword_field", "use_field_mapping") + .addDocValueField("byte_field", "use_field_mapping") + .addDocValueField("short_field", "use_field_mapping") + .addDocValueField("integer_field", "use_field_mapping") + .addDocValueField("long_field", "use_field_mapping") + .addDocValueField("float_field", "use_field_mapping") + .addDocValueField("double_field", "use_field_mapping") + .addDocValueField("date_field", "use_field_mapping") + .addDocValueField("boolean_field", "use_field_mapping") + .addDocValueField("ip_field", "use_field_mapping"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", + "ip_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(Joda.forPattern("dateOptionalTime").printer().print(date))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("byte_field", "#.0") + .addDocValueField("short_field", "#.0") + .addDocValueField("integer_field", "#.0") + .addDocValueField("long_field", "#.0") + .addDocValueField("float_field", "#.0") + .addDocValueField("double_field", "#.0") + .addDocValueField("date_field", "epoch_millis"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue(), equalTo("1.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue(), equalTo("2.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo("3.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo("4.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo("5.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo("6.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(Joda.forPattern("epoch_millis").printer().print(date))); } public void testScriptFields() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java index a8f559ce35e4c..83b795621189b 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -121,6 +122,43 @@ public void testIgnoreMalformed() throws Exception { assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); } + /** + * Test that the indexed shape routing can be provided if it is required + */ + public void testIndexShapeRouting() throws Exception { + String mapping = "{\n" + + " \"_routing\": {\n" + + " \"required\": true\n" + + " },\n" + + " \"properties\": {\n" + + " \"shape\": {\n" + + " \"type\": \"geo_shape\"\n" + + " }\n" + + " }\n" + + " }"; + + + // create index + assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping, XContentType.JSON).get()); + ensureGreen(); + + String source = "{\n" + + " \"shape\" : {\n" + + " \"type\" : \"circle\",\n" + + " \"coordinates\" : [-45.0, 45.0],\n" + + " \"radius\" : \"100m\"\n" + + " }\n" + + "}"; + + indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC")); + + SearchResponse searchResponse = client().prepareSearch("test").setQuery( + geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC") + ).get(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + } + private String findNodeName(String index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java index d9d06c26b7dcf..23cb579bfdc92 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -61,7 +61,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); - assertThat(verifyRepositoryResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 0396b8ac78820..f26c44e05f506 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -217,9 +217,9 @@ private static String toCamelCase(String s) { // should we expose it, or maybe think about higher level integration of the // fake term frequency feature (LUCENE-7854) .put("delimitedtermfrequency", Void.class) - // LUCENE-8273: ConditionalTokenFilter allows analysis chains to skip + // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. - .put("termexclusion", Void.class) + .put("protectedterm", Void.class) .immutableMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 7210fadd7ead5..505a5937d290b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1829,7 +1829,7 @@ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOExceptio return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoMinMasterNodes(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); + nodePrefix, mockPlugins, getClientWrapper()); } protected NodeConfigurationSource getNodeConfigSource() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0c4ffd62cdde9..b0eef695b7ff1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -176,8 +176,6 @@ public final class InternalTestCluster extends TestCluster { static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0; static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1; - static final boolean DEFAULT_ENABLE_HTTP_PIPELINING = true; - /* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */ private final NavigableMap nodes = new TreeMap<>(); @@ -224,7 +222,7 @@ public final class InternalTestCluster extends TestCluster { public InternalTestCluster(long clusterSeed, Path baseDir, boolean randomlyAddDedicatedMasters, boolean autoManageMinMasterNodes, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - boolean enableHttpPipelining, String nodePrefix, Collection> mockPlugins, Function clientWrapper) { + String nodePrefix, Collection> mockPlugins, Function clientWrapper) { super(clusterSeed); this.autoManageMinMasterNodes = autoManageMinMasterNodes; this.clientWrapper = clientWrapper; @@ -305,7 +303,6 @@ public InternalTestCluster(long clusterSeed, Path baseDir, builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(TcpTransport.PORT.getKey(), 0); builder.put("http.port", 0); - builder.put("http.pipelining", enableHttpPipelining); if (Strings.hasLength(System.getProperty("tests.es.logger.level"))) { builder.put("logger.level", System.getProperty("tests.es.logger.level")); } @@ -914,7 +911,7 @@ private void clearDataIfNeeded(RestartCallback callback) throws IOException { private void createNewNode(final Settings newSettings) { final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id - Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); + Settings finalSettings = Settings.builder().put(node.originalSettings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " is not configured after restart of [" + name + "]"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index e0b501c5f25e6..30ac94e343246 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,7 +22,6 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -323,7 +322,7 @@ public void test() throws IOException { if (useDefaultNumberOfShards == false && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { final Request request = new Request("PUT", "/_template/global"); - request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.addHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters()); request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); adminClient().performRequest(request); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 36e282f32959d..9481f60d93384 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; @@ -52,6 +53,7 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; +import java.util.function.Consumer; import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -96,9 +98,10 @@ protected void doStart() { if (useNetworkServer) { acceptorCount = 1; } - nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), - 2, TestingSocketEventHandler::new); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + (s) -> new AcceptorEventHandler(s, this::onNonChannelException), + daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, + () -> new TestingSocketEventHandler(this::onNonChannelException)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = new MockTcpChannelFactory(clientProfileSettings, "client"); @@ -172,8 +175,10 @@ public MockSocketChannel createChannel(SocketSelector selector, SocketChannel ch @Override public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); + Consumer exceptionHandler = (e) -> logger.error(() -> + new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); ServerChannelContext context = new ServerChannelContext(nioServerChannel, this, selector, MockNioTransport.this::acceptChannel, - (e) -> {}); + exceptionHandler); nioServerChannel.setContext(context); return nioServerChannel; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java index 2e2d8aa5adadc..810e42010224c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.Logger; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketEventHandler; @@ -27,15 +26,16 @@ import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; +import java.util.function.Consumer; public class TestingSocketEventHandler extends SocketEventHandler { - public TestingSocketEventHandler(Logger logger) { - super(logger); - } - private Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); + public TestingSocketEventHandler(Consumer exceptionHandler) { + super(exceptionHandler); + } + public void handleConnect(SocketChannelContext context) throws IOException { assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected"; super.handleConnect(context); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index c70708c73acbf..23f44c560baeb 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -19,8 +19,6 @@ */ package org.elasticsearch.test.test; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; @@ -28,6 +26,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.NodeEnvironment; @@ -63,8 +62,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.not; /** @@ -86,16 +83,15 @@ public void testInitializiationIsConsistent() { String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; int numClientNodes = randomIntBetween(0, 10); - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + nodePrefix, Collections.emptyList(), Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + nodePrefix, Collections.emptyList(), Function.identity()); // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, false); @@ -211,16 +207,15 @@ public Settings transportClientSettings() { } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "foobar"; Path baseDir = createTempDir(); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -280,12 +275,11 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); try { cluster.beforeTest(random(), 0.0); final int originalMasterCount = cluster.numMasterNodes(); @@ -390,7 +384,7 @@ public Settings transportClientSettings() { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } - }, 0, randomBoolean(), "", mockPlugins(), Function.identity()); + }, 0, "", mockPlugins(), Function.identity()); cluster.beforeTest(random(), 0.0); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { @@ -473,12 +467,13 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); + List> plugins = new ArrayList<>(mockPlugins()); + plugins.add(NodeAttrCheckPlugin.class); InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2, - "test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix, - mockPlugins(), Function.identity()); + "test", nodeConfigurationSource, 0, nodePrefix, + plugins, Function.identity()); try { cluster.beforeTest(random(), 0.0); assertMMNinNodeSetting(cluster, 2); @@ -509,4 +504,26 @@ public Settings onNodeStopped(String nodeName) throws Exception { cluster.close(); } } + + /** + * Plugin that adds a simple node attribute as setting and checks if that node attribute is not already defined. + * Allows to check that the full-cluster restart logic does not copy over plugin-derived settings. + */ + public static class NodeAttrCheckPlugin extends Plugin { + + private final Settings settings; + + public NodeAttrCheckPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public Settings additionalSettings() { + if (settings.get("node.attr.dummy") != null) { + fail("dummy setting already exists"); + } + return Settings.builder().put("node.attr.dummy", true).build(); + } + + } } diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc index 9e7b787dcea60..b794d3ebd3330 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -34,8 +34,17 @@ The scenarios in this section describe some best practices for generating useful * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc include::customurl.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc include::aggregations.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc include::categories.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc include::populations.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc include::transforms.asciidoc[] diff --git a/x-pack/docs/en/ml/functions.asciidoc b/x-pack/docs/en/ml/functions.asciidoc index a59b289266760..ae5f768e05697 100644 --- a/x-pack/docs/en/ml/functions.asciidoc +++ b/x-pack/docs/en/ml/functions.asciidoc @@ -18,27 +18,6 @@ variations (for example, `count`, `low_count`, and `high_count`). These variatio apply one-sided tests, detecting anomalies only when the values are low or high, depending one which alternative is used. -//For some functions, you can optionally specify a field name in the -//`by_field_name` property. The analysis then considers whether there is an -//anomaly for one of more specific values of that field. In {kib}, use the -//**Key Fields** field in multi-metric jobs or the **by_field_name** field in -//advanced jobs. -//// -TODO: Per Sophie, "This is incorrect... Split Data refers to a partition_field_name. Over fields can only be added in Adv Config... - -Can you please remove the explanations for by/over/partition fields from the documentation for analytical functions. It's a complex topic and will be easier to review in a separate exercise." -//// - -//For some functions, you can also optionally specify a field name in the -//`over_field_name` property. This property shifts the analysis to be population- -//or peer-based and uses the field to split the data. In {kib}, use the -//**Split Data** field in multi-metric jobs or the **over_field_name** field in -//advanced jobs. - -//You can specify a `partition_field_name` with any function. The analysis is then -//segmented with completely independent baselines for each value of that field. -//In {kib}, use the **partition_field_name** field in advanced jobs. - You can specify a `summary_count_field_name` with any function except `metric`. When you use `summary_count_field_name`, the {ml} features expect the input data to be pre-aggregated. The value of the `summary_count_field_name` field @@ -55,13 +34,6 @@ functions are strongly affected by empty buckets. For this reason, there are `non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data. These functions effectively ignore empty buckets. -//// -Some functions can benefit from overlapping buckets. This improves the overall -accuracy of the results but at the cost of a 2 bucket delay in seeing the results. - -The table below provides a high-level summary of the analytical functions provided by the API. Each of the functions is described in detail over the following pages. Note the examples given in these pages use single Detector Configuration objects. -//// - * <> * <> * <> @@ -70,10 +42,23 @@ The table below provides a high-level summary of the analytical functions provid * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/count.asciidoc include::functions/count.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/geo.asciidoc include::functions/geo.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/info.asciidoc include::functions/info.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/metric.asciidoc include::functions/metric.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/rare.asciidoc include::functions/rare.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/sum.asciidoc include::functions/sum.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/time.asciidoc include::functions/time.asciidoc[] diff --git a/x-pack/docs/en/ml/getting-started.asciidoc b/x-pack/docs/en/ml/getting-started.asciidoc index 5b15de51f0bca..2fd4f1ebe4972 100644 --- a/x-pack/docs/en/ml/getting-started.asciidoc +++ b/x-pack/docs/en/ml/getting-started.asciidoc @@ -72,9 +72,20 @@ significant changes to the system. You can alternatively assign the For more information, see <> and <>. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-data.asciidoc include::getting-started-data.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-wizards.asciidoc include::getting-started-wizards.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-single.asciidoc include::getting-started-single.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-multi.asciidoc include::getting-started-multi.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-forecast.asciidoc include::getting-started-forecast.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-next.asciidoc include::getting-started-next.asciidoc[] diff --git a/x-pack/docs/en/ml/overview.asciidoc b/x-pack/docs/en/ml/overview.asciidoc index b82a281acb0d5..5c941b4eda24c 100644 --- a/x-pack/docs/en/ml/overview.asciidoc +++ b/x-pack/docs/en/ml/overview.asciidoc @@ -17,4 +17,5 @@ include::calendars.asciidoc[] There are a few concepts that are core to {ml} in {xpack}. Understanding these concepts from the outset will tremendously help ease the learning process. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/architecture.asciidoc include::architecture.asciidoc[] diff --git a/x-pack/docs/en/security/auditing.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc similarity index 52% rename from x-pack/docs/en/security/auditing.asciidoc rename to x-pack/docs/en/security/auditing/event-types.asciidoc index ee508a5ac8d2d..1a6d4b02b0c86 100644 --- a/x-pack/docs/en/security/auditing.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -1,50 +1,10 @@ [role="xpack"] -[[auditing]] -== Auditing security events - -You can enable auditing to keep track of security-related events such as -authentication failures and refused connections. Logging these events enables you -to monitor your cluster for suspicious activity and provides evidence in the -event of an attack. - -[IMPORTANT] -============================================================================ -Audit logs are **disabled** by default. To enable this functionality, you -must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. -============================================================================ - -{Security} provides two ways to persist audit logs: - -* The <> output, which persists events to - a dedicated `_access.log` file on the host's file system. -* The <> output, which persists events to an Elasticsearch index. -The audit index can reside on the same cluster, or a separate cluster. - -By default, only the `logfile` output is used when enabling auditing. -To facilitate browsing and analyzing the events, you can also enable -indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: - -[source,yaml] ----------------------------- -xpack.security.audit.outputs: [ index, logfile ] ----------------------------- - -The `index` output type should be used in conjunction with the `logfile` -output type Because it is possible for the `index` output type to lose -messages if the target index is unavailable, the `access.log` should be -used as the official record of events. - -NOTE: Audit events are batched for indexing so there is a lag before -events appear in the index. You can control how frequently batches of -events are pushed to the index by setting -`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. - [float] [[audit-event-types]] === Audit event types -Each request may generate multiple audit events. -The following is a list of the events that can be generated: +When you are <>, each request can generate +multiple audit events. The following is a list of the events that can be generated: |====== | `anonymous_access_denied` | | | Logged when a request is denied due to a missing @@ -281,195 +241,3 @@ The log level determines which attributes are included in a log entry. | `rule` | The <> rule that denied the request. |====== - -[float] -[[audit-log-output]] -=== Logfile audit output - -The `logfile` audit output is the default output for auditing. It writes data to -the `_access.log` file in the logs directory. - -[float] -[[audit-log-entry-format]] -=== Log entry format - -The format of a log entry is: - -[source,txt] ----------------------------------------------------------------------------- -[] [] [] [] ----------------------------------------------------------------------------- - -`` :: When the event occurred. You can configure the - timestamp format in `log4j2.properties`. -`` :: Information about the local node that generated - the log entry. You can control what node information - is included by configuring the - {ref}/auditing-settings.html#node-audit-settings[local node info settings]. -`` :: The layer from which this event originated: - `rest`, `transport` or `ip_filter`. -`` :: The type of event that occurred: `anonymous_access_denied`, - `authentication_failed`, `access_denied`, `access_granted`, - `connection_granted`, `connection_denied`. -`` :: A comma-separated list of key-value pairs that contain - data pertaining to the event. Formatted as - `attr1=[val1], attr2=[val2]`. See <> for the attributes that can be included - for each type of event. - -[float] -[[audit-log-settings]] -=== Logfile output settings - -The events and some other information about what gets logged can be -controlled using settings in the `elasticsearch.yml` file. See -{ref}/auditing-settings.html#event-audit-settings[Audited Event Settings] and -{ref}/auditing-settings.html#node-audit-settings[Local Node Info Settings]. - -IMPORTANT: No filtering is performed when auditing, so sensitive data may be -audited in plain text when including the request body in audit events. - -[[logging-file]] -You can also configure how the logfile is written in the `log4j2.properties` -file located in `CONFIG_DIR`. By default, audit information is appended to the -`_access.log` file located in the standard Elasticsearch `logs` directory -(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. - -[float] -[[audit-log-ignore-policy]] -=== Logfile audit events ignore policies - -The comprehensive audit trail is necessary to ensure accountability. It offers tremendous -value during incident response and can even be required for demonstrating compliance. - -The drawback of an audited system is represented by the inevitable performance penalty incurred. -In all truth, the audit trail spends _I/O ops_ that are not available anymore for the user's queries. -Sometimes the verbosity of the audit trail may become a problem that the event type restrictions, -<>, will not alleviate. - -*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail. -These policies define rules that match audit events which will be _ignored_ (read as: not printed). -Rules match on the values of attributes of audit events and complement the <> method. -Imagine the corpus of audit events and the policies chopping off unwanted events. - -IMPORTANT: When utilizing audit events ignore policies you are acknowledging potential -accountability gaps that could render illegitimate actions undetectable. -Please take time to review these policies whenever your system architecture changes. - -A policy is a named set of filter rules. Each filter rule applies to a single event attribute, -one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines -a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit -event attribute for the rule to match. -A policy matches an event if *all* the rules comprising it match the event. -An audit event is ignored, therefore not printed, if it matches *any* policy. All other -non-matching events are printed as usual. - -All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters` -settings namespace. For example, the following policy named _example1_ matches -events from the _kibana_ or _admin_user_ principals **and** operating over indices of the -wildcard form _app-logs*_: - -[source,yaml] ----------------------------- -xpack.security.audit.logfile.events.ignore_filters: - example1: - users: ["kibana", "admin_user"] - indices: ["app-logs*"] ----------------------------- - -An audit event generated by the _kibana_ user and operating over multiple indices -, some of which do not match the indices wildcard, will not match. -As expected, operations generated by all other users (even operating only on indices that -match the _indices_ filter) will not match this policy either. - -Audit events of different types may have <>. -If an event does not contain an attribute for which some policy defines filters, the -event will not match the policy. -For example, the following policy named _example2_, will never match `authentication_success` or -`authentication_failed` events, irrespective of the user's roles, because these -event schemas do not contain the `role` attribute: - -[source,yaml] ----------------------------- -xpack.security.audit.logfile.events.ignore_filters: - example2: - roles: ["admin", "ops_admin_*"] ----------------------------- - -Likewise, any events of users with multiple roles, some of which do not match the -regexps will not match this policy. - -For completeness, although practical use cases should be sparse, a filter can match -a missing attribute of an event, using the empty string ("") or the empty list ([]). -For example, the following policy will match events that do not have the `indices` -attribute (`anonymous_access_denied`, `authentication_success` and other types) as well -as events over the _next_ index. - -[source,yaml] ----------------------------- -xpack.security.audit.logfile.events.ignore_filters: - example3: - indices: ["next", ""] ----------------------------- - - -[float] -[[audit-index]] -=== Index audit output - -In addition to logging to a file, you can store audit logs in Elasticsearch -rolling indices. These indices can be either on the same cluster, or on a -remote cluster. You configure the following settings in -`elasticsearch.yml` to control how audit entries are indexed. To enable -this output, you need to configure the setting `xpack.security.audit.outputs` -in the `elasticsearch.yml` file: - -[source,yaml] ----------------------------- -xpack.security.audit.outputs: [ index, logfile ] ----------------------------- - -For more configuration options, see -{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings]. - -IMPORTANT: No filtering is performed when auditing, so sensitive data may be -audited in plain text when including the request body in audit events. - -[float] -==== Audit index settings - -You can also configure settings for the indices that the events are stored in. -These settings are configured in the `xpack.security.audit.index.settings` namespace -in `elasticsearch.yml`. For example, the following configuration sets the -number of shards and replicas to 1 for the audit indices: - -[source,yaml] ----------------------------- -xpack.security.audit.index.settings: - index: - number_of_shards: 1 - number_of_replicas: 1 ----------------------------- - -[float] -==== Forwarding audit logs to a remote cluster - -To index audit events to a remote Elasticsearch cluster, you configure -the following `xpack.security.audit.index.client` settings: - -* `xpack.security.audit.index.client.hosts` -* `xpack.security.audit.index.client.cluster.name` -* `xpack.security.audit.index.client.xpack.security.user` - -For more information about these settings, see -{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings]. - -You can pass additional settings to the remote client by specifying them in the -`xpack.security.audit.index.client` namespace. For example, to allow the remote -client to discover all of the nodes in the remote cluster you can specify the -`client.transport.sniff` setting: - -[source,yaml] ----------------------------- -xpack.security.audit.index.client.transport.sniff: true ----------------------------- diff --git a/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc b/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc new file mode 100644 index 0000000000000..01ed0f72e746d --- /dev/null +++ b/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc @@ -0,0 +1,24 @@ +[role="xpack"] +[float] +[[forwarding-audit-logfiles]] +==== Forwarding audit logs to a remote cluster + +To index audit events to a remote Elasticsearch cluster, you configure +the following `xpack.security.audit.index.client` settings: + +* `xpack.security.audit.index.client.hosts` +* `xpack.security.audit.index.client.cluster.name` +* `xpack.security.audit.index.client.xpack.security.user` + +For more information about these settings, see +{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings]. + +You can pass additional settings to the remote client by specifying them in the +`xpack.security.audit.index.client` namespace. For example, to allow the remote +client to discover all of the nodes in the remote cluster you can specify the +`client.transport.sniff` setting: + +[source,yaml] +---------------------------- +xpack.security.audit.index.client.transport.sniff: true +---------------------------- diff --git a/x-pack/docs/en/security/auditing/index.asciidoc b/x-pack/docs/en/security/auditing/index.asciidoc new file mode 100644 index 0000000000000..e82fd4397fb71 --- /dev/null +++ b/x-pack/docs/en/security/auditing/index.asciidoc @@ -0,0 +1,15 @@ + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/overview.asciidoc +include::overview.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/event-types.asciidoc +include::event-types.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-logfile.asciidoc +include::output-logfile.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-index.asciidoc +include::output-index.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/forwarding-logs.asciidoc +include::forwarding-logs.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/security/auditing/output-index.asciidoc b/x-pack/docs/en/security/auditing/output-index.asciidoc new file mode 100644 index 0000000000000..e3ba805d715e7 --- /dev/null +++ b/x-pack/docs/en/security/auditing/output-index.asciidoc @@ -0,0 +1,38 @@ +[role="xpack"] +[float] +[[audit-index]] +=== Index audit output + +In addition to logging to a file, you can store audit logs in Elasticsearch +rolling indices. These indices can be either on the same cluster, or on a +remote cluster. You configure the following settings in +`elasticsearch.yml` to control how audit entries are indexed. To enable +this output, you need to configure the setting `xpack.security.audit.outputs` +in the `elasticsearch.yml` file: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +For more configuration options, see +{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings]. + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[float] +==== Audit index settings + +You can also configure settings for the indices that the events are stored in. +These settings are configured in the `xpack.security.audit.index.settings` namespace +in `elasticsearch.yml`. For example, the following configuration sets the +number of shards and replicas to 1 for the audit indices: + +[source,yaml] +---------------------------- +xpack.security.audit.index.settings: + index: + number_of_shards: 1 + number_of_replicas: 1 +---------------------------- diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc new file mode 100644 index 0000000000000..095f57cf61e48 --- /dev/null +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -0,0 +1,130 @@ +[role="xpack"] +[float] +[[audit-log-output]] +=== Logfile audit output + +The `logfile` audit output is the default output for auditing. It writes data to +the `_access.log` file in the logs directory. + +[float] +[[audit-log-entry-format]] +=== Log entry format + +The format of a log entry is: + +[source,txt] +---------------------------------------------------------------------------- +[] [] [] [] +---------------------------------------------------------------------------- + +`` :: When the event occurred. You can configure the + timestamp format in `log4j2.properties`. +`` :: Information about the local node that generated + the log entry. You can control what node information + is included by configuring the + {ref}/auditing-settings.html#node-audit-settings[local node info settings]. +`` :: The layer from which this event originated: + `rest`, `transport` or `ip_filter`. +`` :: The type of event that occurred: `anonymous_access_denied`, + `authentication_failed`, `access_denied`, `access_granted`, + `connection_granted`, `connection_denied`. +`` :: A comma-separated list of key-value pairs that contain + data pertaining to the event. Formatted as + `attr1=[val1], attr2=[val2]`. See <> for the attributes that can be included + for each type of event. + +[float] +[[audit-log-settings]] +=== Logfile output settings + +The events and some other information about what gets logged can be +controlled using settings in the `elasticsearch.yml` file. See +{ref}/auditing-settings.html#event-audit-settings[Audited Event Settings] and +{ref}/auditing-settings.html#node-audit-settings[Local Node Info Settings]. + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[[logging-file]] +You can also configure how the logfile is written in the `log4j2.properties` +file located in `CONFIG_DIR`. By default, audit information is appended to the +`_access.log` file located in the standard Elasticsearch `logs` directory +(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. + +[float] +[[audit-log-ignore-policy]] +=== Logfile audit events ignore policies + +The comprehensive audit trail is necessary to ensure accountability. It offers tremendous +value during incident response and can even be required for demonstrating compliance. + +The drawback of an audited system is represented by the inevitable performance penalty incurred. +In all truth, the audit trail spends _I/O ops_ that are not available anymore for the user's queries. +Sometimes the verbosity of the audit trail may become a problem that the event type restrictions, +<>, will not alleviate. + +*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail. +These policies define rules that match audit events which will be _ignored_ (read as: not printed). +Rules match on the values of attributes of audit events and complement the <> method. +Imagine the corpus of audit events and the policies chopping off unwanted events. + +IMPORTANT: When utilizing audit events ignore policies you are acknowledging potential +accountability gaps that could render illegitimate actions undetectable. +Please take time to review these policies whenever your system architecture changes. + +A policy is a named set of filter rules. Each filter rule applies to a single event attribute, +one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines +a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit +event attribute for the rule to match. +A policy matches an event if *all* the rules comprising it match the event. +An audit event is ignored, therefore not printed, if it matches *any* policy. All other +non-matching events are printed as usual. + +All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters` +settings namespace. For example, the following policy named _example1_ matches +events from the _kibana_ or _admin_user_ principals **and** operating over indices of the +wildcard form _app-logs*_: + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example1: + users: ["kibana", "admin_user"] + indices: ["app-logs*"] +---------------------------- + +An audit event generated by the _kibana_ user and operating over multiple indices +, some of which do not match the indices wildcard, will not match. +As expected, operations generated by all other users (even operating only on indices that +match the _indices_ filter) will not match this policy either. + +Audit events of different types may have <>. +If an event does not contain an attribute for which some policy defines filters, the +event will not match the policy. +For example, the following policy named _example2_, will never match `authentication_success` or +`authentication_failed` events, irrespective of the user's roles, because these +event schemas do not contain the `role` attribute: + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example2: + roles: ["admin", "ops_admin_*"] +---------------------------- + +Likewise, any events of users with multiple roles, some of which do not match the +regexps will not match this policy. + +For completeness, although practical use cases should be sparse, a filter can match +a missing attribute of an event, using the empty string ("") or the empty list ([]). +For example, the following policy will match events that do not have the `indices` +attribute (`anonymous_access_denied`, `authentication_success` and other types) as well +as events over the _next_ index. + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example3: + indices: ["next", ""] +---------------------------- diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc new file mode 100644 index 0000000000000..b60122612a0ab --- /dev/null +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -0,0 +1,40 @@ +[role="xpack"] +[[auditing]] +== Auditing security events + +You can enable auditing to keep track of security-related events such as +authentication failures and refused connections. Logging these events enables you +to monitor your cluster for suspicious activity and provides evidence in the +event of an attack. + +[IMPORTANT] +============================================================================ +Audit logs are **disabled** by default. To enable this functionality, you +must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. +============================================================================ + +{Security} provides two ways to persist audit logs: + +* The <> output, which persists events to + a dedicated `_access.log` file on the host's file system. +* The <> output, which persists events to an Elasticsearch index. +The audit index can reside on the same cluster, or a separate cluster. + +By default, only the `logfile` output is used when enabling auditing. +To facilitate browsing and analyzing the events, you can also enable +indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +The `index` output type should be used in conjunction with the `logfile` +output type Because it is possible for the `index` output type to lose +messages if the target index is unavailable, the `access.log` should be +used as the official record of events. + +NOTE: Audit events are batched for indexing so there is a lag before +events appear in the index. You can control how frequently batches of +events are pushed to the index by setting +`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc new file mode 100644 index 0000000000000..cbcbeebb359ef --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -0,0 +1,225 @@ +[role="xpack"] +[[configuring-saml-realm]] +=== Configuring a SAML realm + +The {stack} supports Security Assertion Markup Language Single Sign On (SAML SSO) +into {kib} with {es} as a backend service. In particular, the {stack} supports +the SAML 2.0 Web Browser SSO and the SAML 2.0 Single Logout profiles. It can +integrate with any identity provider (IdP) that supports at least the SAML 2.0 +Web Browser SSO Profile. + +In SAML terminology, the {stack} is operating as a _service provider_ (SP). For more +information, see {stack-ov}/saml-realm.html[SAML authentication] and +{stack-ov}/saml-guide.html[Configuring SAML SSO on the {stack}]. + +[NOTE] +-- + +* If you configure a SAML realm for use in {kib}, you should also configure +another realm, such as the native realm in your authentication chain. +* These instructions assume that you have an existing SAML identity provider. +-- + +To enable SAML authentication in {es} and add the {stack} as a service provider: + +. Enable SSL/TLS for HTTP. ++ +-- +If your {es} cluster is operating in production mode, you must +configure the HTTP interface to use TLS before you can enable SAML +authentication. + +See <>. +-- + +. Enable the Token Service. ++ +-- +The {es} SAML implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface. You can +explicitly enable it by including the following setting in your +`elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ +-- + +. Configure a SAML IdP metadata file. ++ +-- +The {stack} uses a standard SAML metadata document in XML format, which defines +the capabilities and features of your identity provider. You should be able to +download or generate such a document within your IdP administration interface. + +Most IdPs will provide an appropriate metadata file with all the features that +the {stack} requires. For more information, see +{stack-ov}/saml-guide-idp.html[The identity provider]. +-- + +.. Download the IdP metadata document and store it within the `config` directory +on each {es} node. For example, store it as `config/saml/idp-metadata.xml`. + +.. Get the identifier for your identity provider. ++ +-- +The IdP will have been assigned an identifier (_EntityID_ in SAML terminology), +which is most commonly expressed in Uniform Resource Identifier (URI) form. Your +admin interface might tell you what this is or you might need to read the +metadata document to find it. Look for the `entityID` attribute on the +`EntityDescriptor` element. +-- + +. Create one or more SAML realms. ++ +-- +SAML authentication is enabled by configuring a SAML realm within the +authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +<>. The following settings (in the `elasticsearch.yml` +configuration file) are the most common settings: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: <1> + type: saml <2> + order: 2 <3> + idp.metadata.path: saml/idp-metadata.xml <4> + idp.entity_id: "https://sso.example.com/" <5> + sp.entity_id: "https://kibana.example.com/" <6> + sp.acs: "https://kibana.example.com/api/security/v1/saml" <7> + sp.logout: "https://kibana.example.com/logout" <8> +------------------------------------------------------------ +<1> This setting defines a new authentication realm named "saml1". For an +introduction to realms, see {stack-ov}/realms.html[Realms]. +<2> The `type` must be `saml`. +<3> You should define a unique order on each realm in your authentication chain. +It is recommended that the SAML realm be at the bottom of your authentication +chain (that is, it has the _highest_ order). +<4> This is the path to the metadata file that you saved for your identity provider. +The path that you enter here is relative to your `config/` directory. {security} +automatically monitors this file for changes and reloads the configuration +whenever it is updated. +<5> This is the identifier (SAML EntityID) that your IdP uses. It should match +the `entityID` attribute within the metadata file. +<6> This is a unique identifier for your {kib} instance, expressed as a URI. +You will use this value when you add {kib} as a service provider within your IdP. +We recommend that you use the base URL for your {kib} instance as the entity ID. +<7> The Assertion Consumer Service (ACS) endpoint is the URL within {kib} that +accepts authentication messages from the IdP. This ACS endpoint supports the +SAML HTTP-POST binding only. It must be a URL that is accessible from the web +browser of the user who is attempting to login to {kib}; it does not need to be +directly accessible by {es} or the IdP. The correct value can vary depending on +how you have installed {kib} and whether there are any proxies involved, but it +is typically +$\{kibana-url}/api/security/v1/saml+ where _$\{kibana-url}_ is the +base URL for your {kib} instance. +<8> This is the URL within {kib} that accepts logout messages from the IdP. +Like the `sp.acs` URL, it must be accessible from the web browser, but does +not need to be directly accessible by {es} or the IdP. The correct value can +vary depending on how you have installed {kib} and whether there are any +proxies involved, but it will typically be +$\{kibana-url}/logout+ where +_$\{kibana-url}_ is the base URL for your {kib} instance. + +IMPORTANT: SAML is used when authenticating via {kib}, but it is not an +effective means of authenticating directly to the {es} REST API. For this reason +we recommend that you include at least one additional realm such as the +native realm in your authentication chain for use by API clients. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-create-realm[Create a SAML realm]. +-- + +. Add attribute mappings. ++ +-- +When a user connects to {kib} through the identity provider, the IdP supplies a +SAML assertion that includes attributes for the user. You can configure the SAML +realm to map these attributes to properties on the authenticated user. + +The recommended steps for configuring these SAML attributes are as follows: +-- +.. Consult your IdP to see what user attributes it can provide. This varies +greatly between providers, but you should be able to obtain a list from the +documentation or from your local admin. + +.. Read through the list of user properties that {es} supports and decide which +of them are useful to you and can be provided by your IdP. At a minimum, the +`principal` attribute is required. The `groups` attribute is recommended. + +.. Configure your IdP to release those attributes to your {kib} SAML service +provider. ++ +-- +This process varies by provider - some provide a user interface for this, while +others might require that you edit configuration files. Usually the IdP (or your +local administrator) have suggestions about what URI to use for each attribute. +You can simply accept those suggestions, as the {es} service is entirely +configurable and does not require that any specific URIs are used. +-- + +.. Configure the SAML realm to associate the {es} user properties to the URIs +that you configured in your IdP. ++ +-- +For example, add the following settings to the `elasticsearch.yml` configuration +file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + ... + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +------------------------------------------------------------ + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-attribute-mapping[Attribute mapping]. +-- + +. (Optional) Configure logout services. ++ +-- +The SAML protocol supports the concept of Single Logout (SLO). The level of +support for SLO varies between identity providers. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-logout[SAML logout]. +-- + +. (Optional) Configure encryption and signing. ++ +-- +The {stack} supports generating signed SAML messages (for authentication and/or +logout), verifying signed SAML messages from the IdP (for both authentication +and logout), and processing encrypted content. + +You can configure {es} for signing, encryption, or both, with the same or +separate keys. For more information, see +{stack-ov}/saml-guide-authentication.html#saml-enc-sign[Encryption and signing]. +-- + +. (Optional) Generate service provider metadata. ++ +-- +There are some extra configuration steps that are specific to each identity +provider. If your identity provider can import SP metadata, some of those steps +can be automated or expedited. You can generate SP metadata for the {stack} by +using the <>. +-- + +. Configure role mappings. ++ +-- +When a user authenticates using SAML, they are identified to the {stack}, +but this does not automatically grant them access to perform any actions or +access any data. + +Your SAML users cannot do anything until they are mapped to {security} +roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. +-- + +. {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. + diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index f7b514b81449f..6aa0a72fc8495 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -10,7 +10,7 @@ manage user passwords. [float] ==== Configuring a native realm -See {ref}/[Configuring a native realm]. +See {ref}/configuring-native-realm.html[Configuring a native realm]. [[native-settings]] ==== Native realm settings diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index da5f6a4ea3cea..7633f02b6765b 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -24,28 +24,41 @@ attach your user credentials to the requests sent to {es}. For example, when using realms that support usernames and passwords you can simply attach {wikipedia}/Basic_access_authentication[basic auth] header to the requests. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/built-in-users.asciidoc include::built-in-users.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/internal-users.asciidoc include::internal-users.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/realms.asciidoc include::realms.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc include::active-directory-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/file-realm.asciidoc include::file-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/ldap-realm.asciidoc include::ldap-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/native-realm.asciidoc include::native-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/pki-realm.asciidoc include::pki-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-realm.asciidoc include::saml-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/custom-realm.asciidoc include::custom-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/anonymous-access.asciidoc include::anonymous-access.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/user-cache.asciidoc include::user-cache.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-guide.asciidoc include::saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 740f51c877ded..a57cfaec84c43 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -22,6 +22,7 @@ the primary (or sole) authentication method for users of that {kib} instance. Once you enable SAML authentication in {kib} it will affect all users who try to login. The <> section provides more detail about how this works. +[[saml-guide-idp]] === The identity provider The Elastic Stack supports the SAML 2.0 _Web Browser SSO_ and the SAML @@ -70,6 +71,7 @@ For `` messages, the message itself must be signed, and the signature should be provided as a URL parameter, as required by the HTTP-Redirect binding. +[[saml-guide-authentication]] === Configure {es} for SAML authentication There are five configuration steps to enable SAML authentication in {es}: diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc index 98a1ad8b786b6..e5b61e585c67c 100644 --- a/x-pack/docs/en/security/authorization/overview.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -49,18 +49,26 @@ As an administrator, you will need to define the roles that you want to use, then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/built-in-roles.asciidoc include::built-in-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/managing-roles.asciidoc include::managing-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/privileges.asciidoc include::privileges.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/alias-privileges.asciidoc include::alias-privileges.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/mapping-roles.asciidoc include::mapping-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc include::field-and-document-access-control.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc include::run-as-privilege.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc include::custom-roles-provider.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index cbf4ede328e48..f744e6d7092e3 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -32,14 +32,20 @@ or at least communicate with the cluster in a secured way: * {kibana-ref}/secure-reporting.html[Reporting] * {winlogbeat-ref}/securing-beats.html[Winlogbeat] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc include::ccs-clients-integrations/cross-cluster.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc include::ccs-clients-integrations/java.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc include::ccs-clients-integrations/http.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc include::ccs-clients-integrations/hadoop.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc include::ccs-clients-integrations/beats.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc include::ccs-clients-integrations/monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index bf4800d50d226..eceb0315b20c9 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -155,4 +155,5 @@ GET two:logs-2017.04/_search <1> // TEST[skip:todo] //TBD: Is there a missing description of the <1> callout above? +:edit_url: https://github.com/elastic/kibana/edit/{branch}/x-pack/docs/en/security/cross-cluster-kibana.asciidoc include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index de3895d34b000..d8ef6c2809b34 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -76,6 +76,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -140,5 +141,6 @@ include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] +include::authentication/configuring-saml-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index 96e9287ca01ec..ed891b9a1ba7e 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -108,7 +108,7 @@ include::authentication/overview.asciidoc[] include::authorization/overview.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing.asciidoc -include::auditing.asciidoc[] +include::auditing/index.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications.asciidoc include::securing-communications.asciidoc[] diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 9c65fd6479a4f..ba770c152329b 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -7,4 +7,5 @@ * {ref}/security-api.html[Security API] * {ref}/xpack-commands.html[Security Commands] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/reference/files.asciidoc include::reference/files.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index ef07f0113cb59..11f6b3dc5616e 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -17,6 +17,7 @@ This section shows how to: The authentication of new nodes helps prevent a rogue node from joining the cluster and receiving data through replication. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc include::securing-communications/setting-up-ssl.asciidoc[] //TO-DO: These sections can be removed when all links to them are removed. diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index da4e3a40b7d16..09cb118f68466 100644 --- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -29,8 +29,17 @@ information, see <>. For more information about encrypting communications across the Elastic Stack, see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc include::node-certificates.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc include::tls-transport.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-http.asciidoc include::tls-http.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc include::tls-ad.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc include::tls-ldap.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/settings/audit-settings.asciidoc b/x-pack/docs/en/settings/audit-settings.asciidoc index 14e5d6fa28f46..6274fae790b8c 100644 --- a/x-pack/docs/en/settings/audit-settings.asciidoc +++ b/x-pack/docs/en/settings/audit-settings.asciidoc @@ -121,7 +121,9 @@ To index audit events to a remote {es} cluster, you configure the following `xpack.security.audit.index.client.hosts`:: Specifies a comma-separated list of `host:port` pairs. These hosts should be -nodes in the remote cluster. +nodes in the remote cluster. If you are using default values for the +<> setting, you can omit the +`port` value. Otherwise, it must match the `transport.tcp.port` setting. `xpack.security.audit.index.client.cluster.name`:: Specifies the name of the remote cluster. diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index 2c4df292857f3..4e9d85f1900ae 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -1145,7 +1145,7 @@ Path to the PEM encoded file containing the private key. The passphrase that is used to decrypt the private key. This value is optional as the key might not be encrypted. -`xpack.ssl.secure_key_passphrase` ({<>):: +`xpack.ssl.secure_key_passphrase` (<>):: The passphrase that is used to decrypt the private key. This value is optional as the key might not be encrypted. diff --git a/x-pack/docs/en/settings/ssl-settings.asciidoc b/x-pack/docs/en/settings/ssl-settings.asciidoc index 722aeeea15592..655dfb74a6498 100644 --- a/x-pack/docs/en/settings/ssl-settings.asciidoc +++ b/x-pack/docs/en/settings/ssl-settings.asciidoc @@ -90,7 +90,7 @@ Path to the keystore that holds the private key and certificate. +{ssl-prefix}.ssl.keystore.password+:: Password to the keystore. -+{ssl-prefix}.ssl.keystore.secure_password` (<>):: ++{ssl-prefix}.ssl.keystore.secure_password+ (<>):: Password to the keystore. +{ssl-prefix}.ssl.keystore.key_password+:: diff --git a/x-pack/docs/en/sql/endpoints/cli.asciidoc b/x-pack/docs/en/sql/endpoints/cli.asciidoc index 8f217b61e452a..edbb1dcace4f1 100644 --- a/x-pack/docs/en/sql/endpoints/cli.asciidoc +++ b/x-pack/docs/en/sql/endpoints/cli.asciidoc @@ -2,7 +2,7 @@ [[sql-cli]] == SQL CLI -X-Pack ships with a script to run the SQL CLI in its bin directory: +Elasticsearch ships with a script to run the SQL CLI in its `bin` directory: [source,bash] -------------------------------------------------- @@ -11,7 +11,7 @@ $ ./bin/elasticsearch-sql-cli The jar containing the SQL CLI is a stand alone Java application and the scripts just launch it. You can move it around to other machines -without having to install Elasticsearch or X-Pack on them. +without having to install Elasticsearch on them. You can pass the URL of the Elasticsearch instance to connect to as the first parameter: diff --git a/x-pack/docs/en/sql/endpoints/translate.asciidoc b/x-pack/docs/en/sql/endpoints/translate.asciidoc index 278211411305a..9c1d71af5d35e 100644 --- a/x-pack/docs/en/sql/endpoints/translate.asciidoc +++ b/x-pack/docs/en/sql/endpoints/translate.asciidoc @@ -23,8 +23,14 @@ Which returns: { "size" : 10, "docvalue_fields" : [ - "page_count", - "release_date" + { + "field": "page_count", + "format": "use_field_mapping" + }, + { + "field": "release_date", + "format": "epoch_millis" + } ], "_source": { "includes": [ diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 72489443b3c12..de2516b0589cc 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -259,20 +259,28 @@ PUT _xpack/watcher/watch/log_event_watch <1> A `condition` that only applies to the `notify_pager` action, which restricts its execution to when the condition succeeds (at least 5 hits in this case). +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/email.asciidoc include::actions/email.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/webhook.asciidoc include::actions/webhook.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/index.asciidoc include::actions/index.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/logging.asciidoc include::actions/logging.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/hipchat.asciidoc include::actions/hipchat.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/slack.asciidoc include::actions/slack.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/pagerduty.asciidoc include::actions/pagerduty.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/jira.asciidoc include::actions/jira.asciidoc[] [float] diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc index e83981667d5f7..50424dc132a43 100644 --- a/x-pack/docs/en/watcher/condition.asciidoc +++ b/x-pack/docs/en/watcher/condition.asciidoc @@ -28,12 +28,17 @@ conditions are met. In addition to the watch wide condition, you can also configure conditions per <>. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/always.asciidoc include::condition/always.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/never.asciidoc include::condition/never.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/compare.asciidoc include::condition/compare.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/array-compare.asciidoc include::condition/array-compare.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/script.asciidoc include::condition/script.asciidoc[] diff --git a/x-pack/docs/en/watcher/example-watches.asciidoc b/x-pack/docs/en/watcher/example-watches.asciidoc index 2d747caba5cc4..2a402b20261d7 100644 --- a/x-pack/docs/en/watcher/example-watches.asciidoc +++ b/x-pack/docs/en/watcher/example-watches.asciidoc @@ -9,6 +9,8 @@ For more example watches you can use as a starting point for building custom watches, see the https://github.com/elastic/examples/tree/master/Alerting[Example Watches] in the Elastic Examples repo. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc include::example-watches/example-watch-clusterstatus.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc include::example-watches/example-watch-meetupdata.asciidoc[] diff --git a/x-pack/docs/en/watcher/input.asciidoc b/x-pack/docs/en/watcher/input.asciidoc index d74f5cd80f1eb..6dee849c735f9 100644 --- a/x-pack/docs/en/watcher/input.asciidoc +++ b/x-pack/docs/en/watcher/input.asciidoc @@ -19,10 +19,14 @@ execution context. NOTE: If you don't define an input for a watch, an empty payload is loaded into the execution context. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/simple.asciidoc include::input/simple.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/search.asciidoc include::input/search.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/http.asciidoc include::input/http.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/chain.asciidoc include::input/chain.asciidoc[] diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index bcf41252433ba..e5cb6b54b0c65 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -101,20 +101,29 @@ XPackClient xpackClient = new XPackClient(client); WatcherClient watcherClient = xpackClient.watcher(); -------------------------------------------------- +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/put-watch.asciidoc include::java/put-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/get-watch.asciidoc include::java/get-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/delete-watch.asciidoc include::java/delete-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/execute-watch.asciidoc include::java/execute-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/ack-watch.asciidoc include::java/ack-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/activate-watch.asciidoc include::java/activate-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc include::java/deactivate-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/stats.asciidoc include::java/stats.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/service.asciidoc include::java/service.asciidoc[] diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 1b99d595b9c8f..0351c9b8c1214 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -55,8 +55,11 @@ part of the definition of the `my_webhook` action. <1> A watch level `transform` <2> An action level `transform` +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/search.asciidoc include::transform/search.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/script.asciidoc include::transform/script.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/chain.asciidoc include::transform/chain.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc index ee52dbba3bd7a..af830e829a45e 100644 --- a/x-pack/docs/en/watcher/trigger.asciidoc +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -9,4 +9,5 @@ the trigger and triggering the watch when needed. {watcher} is designed to support different types of triggers, but only time-based <> triggers are currently available. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger/schedule.asciidoc include::trigger/schedule.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc index 7cd38c5fc9ba0..abbc3f5cfe8e5 100644 --- a/x-pack/docs/en/watcher/trigger/schedule.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -26,16 +26,23 @@ once per minute. For more information about throttling, see * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/hourly.asciidoc include::schedule/hourly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/daily.asciidoc include::schedule/daily.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/weekly.asciidoc include::schedule/weekly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/monthly.asciidoc include::schedule/monthly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/yearly.asciidoc include::schedule/yearly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/cron.asciidoc include::schedule/cron.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/interval.asciidoc include::schedule/interval.asciidoc[] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index fa0c239aab17d..99e6a10ad92de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -223,6 +223,7 @@ protected PutLicenseResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); MetaData currentMetadata = currentState.metaData(); LicensesMetaData licensesMetaData = currentMetadata.custom(LicensesMetaData.TYPE); Version trialVersion = null; @@ -341,7 +342,7 @@ protected void doStart() throws ElasticsearchException { if (clusterService.lifecycleState() == Lifecycle.State.STARTED) { final ClusterState clusterState = clusterService.state(); if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false && - clusterState.nodes().getMasterNode() != null) { + clusterState.nodes().getMasterNode() != null && XPackPlugin.isReadyForXPackCustomMetadata(clusterState)) { final LicensesMetaData currentMetaData = clusterState.metaData().custom(LicensesMetaData.TYPE); boolean noLicense = currentMetaData == null || currentMetaData.getLicense() == null; if (clusterState.getNodes().isLocalNodeElectedMaster() && @@ -374,6 +375,12 @@ public void clusterChanged(ClusterChangedEvent event) { final ClusterState previousClusterState = event.previousState(); final ClusterState currentClusterState = event.state(); if (!currentClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + if (XPackPlugin.isReadyForXPackCustomMetadata(currentClusterState) == false) { + logger.debug("cannot add license to cluster as the following nodes might not understand the license metadata: {}", + () -> XPackPlugin.nodesNotReadyForXPackCustomMetadata(currentClusterState)); + return; + } + final LicensesMetaData prevLicensesMetaData = previousClusterState.getMetaData().custom(LicensesMetaData.TYPE); final LicensesMetaData currentLicensesMetaData = currentClusterState.getMetaData().custom(LicensesMetaData.TYPE); if (logger.isDebugEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 355482872d629..0cf949a69906f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.Collections; @@ -59,6 +60,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); LicensesMetaData licensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); License currentLicense = LicensesMetaData.extractLicense(licensesMetaData); if (currentLicense == null || currentLicense.type().equals("basic") == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 355672dedf717..5c5c03151ba26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.Collections; @@ -64,6 +65,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); if (request.isAcknowledged() == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index 77695f64538bc..823283ac5a852 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.UUID; @@ -49,6 +50,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); final MetaData metaData = currentState.metaData(); final LicensesMetaData currentLicensesMetaData = metaData.custom(LicensesMetaData.TYPE); // do not generate a license if any license is present diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 5ee46f3b3c97a..77d521e2d4322 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -9,15 +9,20 @@ import org.apache.lucene.util.SetOnce; import org.bouncycastle.operator.OperatorCreationException; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.multibindings.Multibinder; @@ -33,6 +38,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.license.Licensing; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ExtensiblePlugin; @@ -46,10 +52,13 @@ import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import javax.security.auth.DestroyFailedException; @@ -62,14 +71,19 @@ import java.time.Clock; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin { private static Logger logger = ESLoggerFactory.getLogger(XPackPlugin.class); private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + public static final String XPACK_INSTALLED_NODE_ATTR = "xpack.installed"; + // TODO: clean up this library to not ask for write access to all system properties! static { // invoke this clinit in unbound with permissions to access all system properties @@ -138,6 +152,75 @@ protected Clock getClock() { public static LicenseService getSharedLicenseService() { return licenseService.get(); } public static XPackLicenseState getSharedLicenseState() { return licenseState.get(); } + /** + * Checks if the cluster state allows this node to add x-pack metadata to the cluster state, + * and throws an exception otherwise. + * This check should be called before installing any x-pack metadata to the cluster state, + * to ensure that the other nodes that are part of the cluster will be able to deserialize + * that metadata. Note that if the cluster state already contains x-pack metadata, this + * check assumes that the nodes are already ready to receive additional x-pack metadata. + * Having this check properly in place everywhere allows to install x-pack into a cluster + * using a rolling restart. + */ + public static void checkReadyForXPackCustomMetadata(ClusterState clusterState) { + if (alreadyContainsXPackCustomMetadata(clusterState)) { + return; + } + List notReadyNodes = nodesNotReadyForXPackCustomMetadata(clusterState); + if (notReadyNodes.isEmpty() == false) { + throw new IllegalStateException("The following nodes are not ready yet for enabling x-pack custom metadata: " + notReadyNodes); + } + } + + /** + * Checks if the cluster state allows this node to add x-pack metadata to the cluster state. + * See {@link #checkReadyForXPackCustomMetadata} for more details. + */ + public static boolean isReadyForXPackCustomMetadata(ClusterState clusterState) { + return alreadyContainsXPackCustomMetadata(clusterState) || nodesNotReadyForXPackCustomMetadata(clusterState).isEmpty(); + } + + /** + * Returns the list of nodes that won't allow this node from adding x-pack metadata to the cluster state. + * See {@link #checkReadyForXPackCustomMetadata} for more details. + */ + public static List nodesNotReadyForXPackCustomMetadata(ClusterState clusterState) { + // check that all nodes would be capable of deserializing newly added x-pack metadata + final List notReadyNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false).filter(node -> { + final String xpackInstalledAttr = node.getAttributes().getOrDefault(XPACK_INSTALLED_NODE_ATTR, "false"); + + // The node attribute XPACK_INSTALLED_NODE_ATTR was only introduced in 6.3.0, so when + // we have an older node in this mixed-version cluster without any x-pack metadata, + // we want to prevent x-pack from adding custom metadata + return node.getVersion().before(Version.V_6_3_0) || Booleans.parseBoolean(xpackInstalledAttr) == false; + }).collect(Collectors.toList()); + + return notReadyNodes; + } + + private static boolean alreadyContainsXPackCustomMetadata(ClusterState clusterState) { + final MetaData metaData = clusterState.metaData(); + return metaData.custom(LicensesMetaData.TYPE) != null || + metaData.custom(MLMetadataField.TYPE) != null || + metaData.custom(WatcherMetaData.TYPE) != null || + clusterState.custom(TokenMetaData.TYPE) != null; + } + + @Override + public Settings additionalSettings() { + final String xpackInstalledNodeAttrSetting = "node.attr." + XPACK_INSTALLED_NODE_ATTR; + + if (settings.get(xpackInstalledNodeAttrSetting) != null) { + throw new IllegalArgumentException("Directly setting [" + xpackInstalledNodeAttrSetting + "] is not permitted"); + } + + if (transportClientMode) { + return super.additionalSettings(); + } else { + return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); + } + } + @Override public Collection createGuiceModules() { ArrayList modules = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 5779924bb27fb..8559ab0703b43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -193,9 +193,7 @@ private static List filter(Iterable iterable, CharacterRunAutomaton i continue; } Map filteredValue = filter((Map)value, includeAutomaton, state); - if (filteredValue.isEmpty() == false) { - filtered.add(filteredValue); - } + filtered.add(filteredValue); } else if (value instanceof Iterable) { List filteredValue = filter((Iterable) value, includeAutomaton, initialState); if (filteredValue.isEmpty() == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index a7ef1f0c02f4f..d897d55e5fdc4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -111,7 +111,7 @@ protected ChannelHandler getClientChannelInitializer() { protected void onException(TcpChannel channel, Exception e) { if (!lifecycle.started()) { // just close and ignore - we are already stopped and just need to make sure we release all resources - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (SSLExceptionHelper.isNotSslRecordException(e)) { if (logger.isTraceEnabled()) { logger.trace( @@ -119,21 +119,21 @@ protected void onException(TcpChannel channel, Exception e) { } else { logger.warn("received plaintext traffic on an encrypted channel, closing connection {}", channel); } - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (SSLExceptionHelper.isCloseDuringHandshakeException(e)) { if (logger.isTraceEnabled()) { logger.trace(new ParameterizedMessage("connection {} closed during ssl handshake", channel), e); } else { logger.warn("connection {} closed during handshake", channel); } - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else if (SSLExceptionHelper.isReceivedCertificateUnknownException(e)) { if (logger.isTraceEnabled()) { logger.trace(new ParameterizedMessage("client did not trust server's certificate, closing connection {}", channel), e); } else { logger.warn("client did not trust this server's certificate, closing connection {}", channel); } - TcpChannel.closeChannel(channel, false); + TcpChannel.closeChannel(channel); } else { super.onException(channel, e); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java index 2f110f4f8a9e8..5bc33ae330a18 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java @@ -18,14 +18,16 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.junit.After; import org.junit.Before; import java.nio.file.Path; +import java.util.Arrays; -import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -66,6 +68,7 @@ protected void setInitialState(License license, XPackLicenseState licenseState, when(state.metaData()).thenReturn(metaData); final DiscoveryNode mockNode = getLocalNode(); when(discoveryNodes.getMasterNode()).thenReturn(mockNode); + when(discoveryNodes.spliterator()).thenReturn(Arrays.asList(mockNode).spliterator()); when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(false); when(state.nodes()).thenReturn(discoveryNodes); when(state.getNodes()).thenReturn(discoveryNodes); // it is really ridiculous we have nodes() and getNodes()... @@ -76,7 +79,8 @@ protected void setInitialState(License license, XPackLicenseState licenseState, } protected DiscoveryNode getLocalNode() { - return new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + return new DiscoveryNode("b", buildNewFakeTransportAddress(), singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"), + emptySet(), Version.CURRENT); } @After diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java new file mode 100644 index 0000000000000..59731cab71db8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; + +public class XPackPluginTests extends ESTestCase { + + public void testXPackInstalledAttrClash() throws Exception { + Settings.Builder builder = Settings.builder(); + builder.put("node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR, randomBoolean()); + if (randomBoolean()) { + builder.put(Client.CLIENT_TYPE_SETTING_S.getKey(), "transport"); + } + XPackPlugin xpackPlugin = createXPackPlugin(builder.put("path.home", createTempDir()).build()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, xpackPlugin::additionalSettings); + assertThat(e.getMessage(), + containsString("Directly setting [node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR + "] is not permitted")); + } + + public void testXPackInstalledAttrExists() throws Exception { + XPackPlugin xpackPlugin = createXPackPlugin(Settings.builder().put("path.home", createTempDir()).build()); + assertEquals("true", xpackPlugin.additionalSettings().get("node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR)); + } + + public void testNodesNotReadyForXPackCustomMetadata() { + boolean compatible; + boolean nodesCompatible = true; + DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); + + for (int i = 0; i < randomInt(3); i++) { + final Version version = VersionUtils.randomVersion(random()); + final Map attributes; + if (randomBoolean() && version.onOrAfter(Version.V_6_3_0)) { + attributes = Collections.singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"); + } else { + nodesCompatible = false; + attributes = Collections.emptyMap(); + } + + discoveryNodes.add(new DiscoveryNode("node_" + i, buildNewFakeTransportAddress(), attributes, Collections.emptySet(), + Version.CURRENT)); + } + ClusterState.Builder clusterStateBuilder = ClusterState.builder(ClusterName.DEFAULT); + + if (randomBoolean()) { + clusterStateBuilder.putCustom(TokenMetaData.TYPE, new TokenMetaData(Collections.emptyList(), new byte[0])); + compatible = true; + } else { + compatible = nodesCompatible; + } + + ClusterState clusterState = clusterStateBuilder.nodes(discoveryNodes.build()).build(); + + assertEquals(XPackPlugin.nodesNotReadyForXPackCustomMetadata(clusterState).isEmpty(), nodesCompatible); + assertEquals(XPackPlugin.isReadyForXPackCustomMetadata(clusterState), compatible); + + if (compatible == false) { + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> XPackPlugin.checkReadyForXPackCustomMetadata(clusterState)); + assertThat(e.getMessage(), containsString("The following nodes are not ready yet for enabling x-pack custom metadata:")); + } + } + + private XPackPlugin createXPackPlugin(Settings settings) throws Exception { + return new XPackPlugin(settings, null){ + + @Override + protected void setSslService(SSLService sslService) { + // disable + } + + @Override + protected void setLicenseState(XPackLicenseState licenseState) { + // disable + } + }; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index 4c74e7f5d9059..e71b0e5e8bdc1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -716,6 +716,22 @@ public void testSourceFiltering() { expected.put("foo", subArray); assertEquals(expected, filtered); + + // json array objects that have no matching fields should be left empty instead of being removed: + // (otherwise nested inner hit source filtering fails with AOOB) + map = new HashMap<>(); + map.put("foo", "value"); + List> values = new ArrayList<>(); + values.add(Collections.singletonMap("foo", "1")); + values.add(Collections.singletonMap("baz", "2")); + map.put("bar", values); + + include = new CharacterRunAutomaton(Automatons.patterns("bar.baz")); + filtered = FieldSubsetReader.filter(map, include, 0); + + expected = new HashMap<>(); + expected.put("bar", Arrays.asList(new HashMap<>(), Collections.singletonMap("baz", "2"))); + assertEquals(expected, filtered); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index bdefabdb294e5..a1714a8e3f5db 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -316,12 +316,8 @@ public Settings additionalSettings() { } private void addMlNodeAttribute(Settings.Builder additionalSettings, String attrName, String value) { - // Unfortunately we cannot simply disallow any value, because the internal cluster integration - // test framework will restart nodes with settings copied from the node immediately before it - // was stopped. The best we can do is reject inconsistencies, and report this in a way that - // makes clear that setting the node attribute directly is not allowed. String oldValue = settings.get(attrName); - if (oldValue == null || oldValue.equals(value)) { + if (oldValue == null) { additionalSettings.put(attrName, value); } else { reportClashingNodeAttribute(attrName); @@ -487,7 +483,7 @@ public List getRestHandlers(Settings settings, RestController restC new RestStartDatafeedAction(settings, restController), new RestStopDatafeedAction(settings, restController), new RestDeleteModelSnapshotAction(settings, restController), - new RestDeleteExpiredDataAction(settings, restController), + new RestDeleteExpiredDataAction(settings, restController), new RestForecastJobAction(settings, restController), new RestGetCalendarsAction(settings, restController), new RestPutCalendarAction(settings, restController), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 220d97e89ba14..8c9eabe6de19a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; @@ -120,6 +121,7 @@ protected DeleteDatafeedAction.Response newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState); PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index e939c6ef31a2f..8e7a0fef41e89 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -57,6 +58,7 @@ protected void masterOperation(FinalizeJobExecutionAction.Request request, Clust clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); Date finishedTime = new Date(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 2b4304a205b13..08a9dfb09c1d9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -141,6 +142,7 @@ public ClusterState execute(ClusterState currentState) { } private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) { + XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) .putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java index 0524cb28a0c11..12e6fb62fd727 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java @@ -28,7 +28,6 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction { - private final ClusterService clusterService; private final JobProvider jobProvider; private final JobManager jobManager; @@ -36,27 +35,21 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction listener) { - ClusterState clusterState = clusterService.state(); - final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); - Set jobIdsToAdd = Strings.tokenizeByCommaToSet(request.getJobIdsToAddExpression()); Set jobIdsToRemove = Strings.tokenizeByCommaToSet(request.getJobIdsToRemoveExpression()); - jobProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove, mlMetadata, + jobProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove, c -> { - List existingJobsOrGroups = - c.getJobIds().stream().filter(mlMetadata::isGroupOrJob).collect(Collectors.toList()); - jobManager.updateProcessOnCalendarChanged(existingJobsOrGroups); + jobManager.updateProcessOnCalendarChanged(c.getJobIds()); listener.onResponse(new PutCalendarAction.Response(c)); }, listener::onFailure); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 2d67e64ec60e7..2042d917f6057 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -30,6 +30,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -67,6 +68,7 @@ import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; /** * Allows interactions with jobs. The managed interactions include: @@ -183,6 +185,9 @@ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegist DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead."); } + // pre-flight check, not necessarily required, but avoids figuring this out while on the CS update thread + XPackPlugin.checkReadyForXPackCustomMetadata(state); + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); if (currentMlMetadata.getJobs().containsKey(job.getId())) { actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); @@ -420,8 +425,13 @@ public void updateProcessOnFilterChanged(MlFilter filter) { public void updateProcessOnCalendarChanged(List calendarJobIds) { ClusterState clusterState = clusterService.state(); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); + + List existingJobsOrGroups = + calendarJobIds.stream().filter(mlMetadata::isGroupOrJob).collect(Collectors.toList()); + Set expandedJobIds = new HashSet<>(); - calendarJobIds.forEach(jobId -> expandedJobIds.addAll(expandJobIds(jobId, true, clusterState))); + existingJobsOrGroups.forEach(jobId -> expandedJobIds.addAll(expandJobIds(jobId, true, clusterState))); for (String jobId : expandedJobIds) { if (isJobOpen(clusterState, jobId)) { updateJobProcessNotifier.submitJobUpdate(UpdateParams.scheduledEventsUpdate(jobId), ActionListener.wrap( @@ -559,6 +569,7 @@ private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentSt } private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); ClusterState.Builder newState = ClusterState.builder(currentState); newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MLMetadataField.TYPE, builder.build()).build()); return newState.build(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java index d7b10fb622bdf..8014bacf1e0f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java @@ -1114,7 +1114,7 @@ public void getForecastRequestStats(String jobId, String forecastId, Consumer handler.accept(result.result), errorHandler, () -> null); } - public void updateCalendar(String calendarId, Set jobIdsToAdd, Set jobIdsToRemove, MlMetadata mlMetadata, + public void updateCalendar(String calendarId, Set jobIdsToAdd, Set jobIdsToRemove, Consumer handler, Consumer errorHandler) { ActionListener getCalendarListener = ActionListener.wrap( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index 9b141380c65eb..2f1aa29d919d9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.test.ESTestCase; @@ -37,13 +36,11 @@ public void testNoAttributes_givenNoClash() { public void testNoAttributes_givenSameAndMlEnabled() { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { - builder.put("xpack.ml.enabled", true); - builder.put("node.attr.ml.enabled", true); + builder.put("xpack.ml.enabled", randomBoolean()); } if (randomBoolean()) { int maxOpenJobs = randomIntBetween(5, 15); builder.put("xpack.ml.max_open_jobs", maxOpenJobs); - builder.put("node.attr.ml.max_open_jobs", maxOpenJobs); } MachineLearning machineLearning = createMachineLearning(builder.put("path.home", createTempDir()).build()); assertNotNull(machineLearning.additionalSettings()); @@ -51,16 +48,8 @@ public void testNoAttributes_givenSameAndMlEnabled() { public void testNoAttributes_givenClash() { Settings.Builder builder = Settings.builder(); - boolean enabled = true; - if (randomBoolean()) { - enabled = randomBoolean(); - builder.put("xpack.ml.enabled", enabled); - } - if (randomBoolean()) { - builder.put("xpack.ml.max_open_jobs", randomIntBetween(9, 12)); - } if (randomBoolean()) { - builder.put("node.attr.ml.enabled", !enabled); + builder.put("node.attr.ml.enabled", randomBoolean()); } else { builder.put("node.attr.ml.max_open_jobs", randomIntBetween(13, 15)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java index 11f714ae449a0..120f04e95e70b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobProviderIT.java @@ -244,7 +244,7 @@ private void updateCalendar(String calendarId, Set idsToAdd, Set throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionHolder = new AtomicReference<>(); - jobProvider.updateCalendar(calendarId, idsToAdd, idsToRemove, mlMetadata, + jobProvider.updateCalendar(calendarId, idsToAdd, idsToRemove, r -> latch.countDown(), e -> { exceptionHolder.set(e); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 437a965dcf02a..61fc73d1641e1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -197,6 +197,7 @@ public MockClientBuilder prepareSearchExecuteListener(String index, SearchRespon when(builder.setFetchSource(anyBoolean())).thenReturn(builder); when(builder.setScroll(anyString())).thenReturn(builder); when(builder.addDocValueField(any(String.class))).thenReturn(builder); + when(builder.addDocValueField(any(String.class), any(String.class))).thenReturn(builder); when(builder.addSort(any(String.class), any(SortOrder.class))).thenReturn(builder); when(builder.setQuery(any())).thenReturn(builder); when(builder.setSize(anyInt())).thenReturn(builder); @@ -246,6 +247,7 @@ public MockClientBuilder prepareSearch(String index, String type, int from, int when(builder.setSize(eq(size))).thenReturn(builder); when(builder.setFetchSource(eq(true))).thenReturn(builder); when(builder.addDocValueField(any(String.class))).thenReturn(builder); + when(builder.addDocValueField(any(String.class), any(String.class))).thenReturn(builder); when(builder.addSort(any(String.class), any(SortOrder.class))).thenReturn(builder); when(builder.get()).thenReturn(response); when(client.prepareSearch(eq(index))).thenReturn(builder); @@ -262,6 +264,7 @@ public MockClientBuilder prepareSearchAnySize(String index, String type, SearchR when(builder.setSize(any(Integer.class))).thenReturn(builder); when(builder.setFetchSource(eq(true))).thenReturn(builder); when(builder.addDocValueField(any(String.class))).thenReturn(builder); + when(builder.addDocValueField(any(String.class), any(String.class))).thenReturn(builder); when(builder.addSort(any(String.class), any(SortOrder.class))).thenReturn(builder); when(builder.get()).thenReturn(response); when(client.prepareSearch(eq(index))).thenReturn(builder); diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat index d44ca227c07fd..8c8a0c69f5626 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateGenerateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat index 4426fb87d3ba6..f898f885ce0a3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat index 79090b6490790..f9486979e6bc3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat index 9e5625d0b912e..4ddb8da3ff143 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat index b449ca09a6c30..f380e5f55271f 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat index 3ee9dcb3ba9cb..1eff4aad8251e 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat index b32b9398f9971..7f7347d706ff5 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.file.tool.UsersTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat index 035f1c965ffb6..d003412fc08d9 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat @@ -2,6 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -call "%~dp0x-pack-env.bat" || exit /b 1 - set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-security/* diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 44f5afd4bdb07..133093df33a13 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -317,7 +317,7 @@ public Collection createGuiceModules() { } modules.add(b -> XPackPlugin.bindFeatureSet(b, SecurityFeatureSet.class)); - + if (enabled == false) { modules.add(b -> { b.bind(Realms.class).toProvider(Providers.of(null)); // for SecurityFeatureSet @@ -903,15 +903,6 @@ public UnaryOperator> getIndexTemplateMetaDat }; } - @Override - public Map> getInitialClusterStateCustomSupplier() { - if (enabled) { - return Collections.singletonMap(TokenMetaData.TYPE, () -> tokenService.get().getTokenMetaData()); - } else { - return Collections.emptyMap(); - } - } - @Override public Function> getFieldFilter() { if (enabled) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index d23415f87dfcc..2934fb8062de4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -8,6 +8,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Priority; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.ElasticsearchSecurityException; @@ -63,6 +66,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.security.ScrollHelper; @@ -107,6 +111,7 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; @@ -1327,6 +1332,8 @@ public TimeValue masterNodeTimeout() { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); + if (tokenMetaData.equals(currentState.custom(TokenMetaData.TYPE))) { return currentState; } @@ -1347,6 +1354,15 @@ private void initialize(ClusterService clusterService) { return; } + if (state.nodes().isLocalNodeElectedMaster()) { + if (XPackPlugin.isReadyForXPackCustomMetadata(state)) { + installTokenMetadata(state.metaData()); + } else { + logger.debug("cannot add token metadata to cluster as the following nodes might not understand the metadata: {}", + () -> XPackPlugin.nodesNotReadyForXPackCustomMetadata(state)); + } + } + TokenMetaData custom = event.state().custom(TokenMetaData.TYPE); if (custom != null && custom.equals(getTokenMetaData()) == false) { logger.info("refresh keys"); @@ -1360,6 +1376,39 @@ private void initialize(ClusterService clusterService) { }); } + // to prevent too many cluster state update tasks to be queued for doing the same update + private final AtomicBoolean installTokenMetadataInProgress = new AtomicBoolean(false); + + private void installTokenMetadata(MetaData metaData) { + if (metaData.custom(TokenMetaData.TYPE) == null) { + if (installTokenMetadataInProgress.compareAndSet(false, true)) { + clusterService.submitStateUpdateTask("install-token-metadata", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); + + if (currentState.custom(TokenMetaData.TYPE) == null) { + return ClusterState.builder(currentState).putCustom(TokenMetaData.TYPE, getTokenMetaData()).build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + installTokenMetadataInProgress.set(false); + logger.error("unable to install token metadata", e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + installTokenMetadataInProgress.set(false); + } + }); + } + } + } + /** * For testing */ diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index 0f511af6b57d8..1c9d779c2cc37 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.transport.nio; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -137,8 +138,10 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel @Override public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel); - ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, SecurityNioTransport.this::acceptChannel, - (e) -> {}); + Consumer exceptionHandler = (e) -> logger.error(() -> + new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); + Consumer acceptor = SecurityNioTransport.this::acceptChannel; + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); nioChannel.setContext(context); return nioChannel; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 00b46b332cb7c..815f26942767a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -47,6 +47,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -163,24 +164,6 @@ public static void initDefaultSettings() { public static void destroyDefaultSettings() { SECURITY_DEFAULT_SETTINGS = null; customSecuritySettingsSource = null; - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -204,6 +187,35 @@ protected void before() throws Throwable { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 1ee654c0baffc..cda627806e7b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -26,6 +26,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -97,25 +98,6 @@ private static void tearDownRestClient() { IOUtils.closeWhileHandlingException(restClient); restClient = null; } - - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -130,6 +112,35 @@ protected void before() { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index ec448f14e9160..dab3d023f65d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -214,7 +214,7 @@ protected void addDefaultSecurityTransportType(Settings.Builder builder, Setting mockPlugins.add(getTestTransportPlugin()); } remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, cluster2Name, - cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, + cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, useSecurity ? getClientWrapper() : Function.identity()); remoteCluster.beforeTest(random(), 0.5); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java index 7002803a3d49c..96bba962237fe 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java @@ -117,7 +117,7 @@ public Settings nodeSettings(int nodeOrdinal) { } }; remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, - cluster2Name, cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); + cluster2Name, cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); remoteCluster.beforeTest(random(), 0.0); assertNoTimeout(remoteCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get()); } diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java index 061b70a55d975..76f73fada0663 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.AbstractStreamableTestCase; import java.io.IOException; @@ -18,7 +19,7 @@ protected SqlTranslateResponse createTestInstance() { if (randomBoolean()) { long docValues = iterations(5, 10); for (int i = 0; i < docValues; i++) { - s.docValueField(randomAlphaOfLength(10)); + s.docValueField(randomAlphaOfLength(10), DocValueFieldsContext.USE_DEFAULT_FORMAT); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 62941a5b14f07..055e34758cc75 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -341,12 +341,12 @@ protected void handleResponse(SearchResponse response, ActionListener sourceFields = new LinkedHashSet<>(); - final Set docFields = new LinkedHashSet<>(); + final Set docFields = new LinkedHashSet<>(); final Map scriptFields = new LinkedHashMap<>(); boolean trackScores = false; @@ -47,8 +49,8 @@ public void addSourceField(String field) { /** * Retrieve the requested field from doc values (or fielddata) of the document */ - public void addDocField(String field) { - docFields.add(field); + public void addDocField(String field, String format) { + docFields.add(new FieldAndFormat(field, format)); } /** @@ -67,7 +69,8 @@ public void build(SearchSourceBuilder sourceBuilder) { if (!sourceFields.isEmpty()) { sourceBuilder.fetchSource(sourceFields.toArray(Strings.EMPTY_ARRAY), null); } - docFields.forEach(sourceBuilder::docValueField); + docFields.forEach(field -> sourceBuilder.docValueField(field.field, + field.format == null ? DocValueFieldsContext.USE_DEFAULT_FORMAT : field.format)); scriptFields.forEach(sourceBuilder::scriptField); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 159127fb24cb2..66e177530547f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -5,12 +5,16 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; import java.io.IOException; @@ -41,15 +45,17 @@ private static String[] sourcePath(String name, boolean useDocValue, String hitN } private final String fieldName, hitName; + private final DataType dataType; private final boolean useDocValue; private final String[] path; - public FieldHitExtractor(String name, boolean useDocValue) { - this(name, useDocValue, null); + public FieldHitExtractor(String name, DataType dataType, boolean useDocValue) { + this(name, dataType, useDocValue, null); } - public FieldHitExtractor(String name, boolean useDocValue, String hitName) { + public FieldHitExtractor(String name, DataType dataType, boolean useDocValue, String hitName) { this.fieldName = name; + this.dataType = dataType; this.useDocValue = useDocValue; this.hitName = hitName; @@ -64,6 +70,16 @@ public FieldHitExtractor(String name, boolean useDocValue, String hitName) { FieldHitExtractor(StreamInput in) throws IOException { fieldName = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + String esType = in.readOptionalString(); + if (esType != null) { + dataType = DataType.fromEsType(esType); + } else { + dataType = null; + } + } else { + dataType = null; + } useDocValue = in.readBoolean(); hitName = in.readOptionalString(); path = sourcePath(fieldName, useDocValue, hitName); @@ -77,6 +93,9 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(dataType == null ? null : dataType.esType); + } out.writeBoolean(useDocValue); out.writeOptionalString(hitName); } @@ -117,6 +136,9 @@ private Object unwrapMultiValue(Object values) { if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } + if (values instanceof String && dataType == DataType.DATE) { + return new DateTime(Long.parseLong(values.toString()), DateTimeZone.UTC); + } if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean || values instanceof ReadableDateTime) { return values; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index 6aa6b6a50e9bc..d135b8a086566 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -9,9 +9,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; +import org.joda.time.ReadableInstant; import java.io.IOException; import java.util.Objects; @@ -78,15 +80,21 @@ public Object process(Object l) { return null; } - if (!(l instanceof ReadableDateTime)) { - throw new SqlIllegalArgumentException("A date/time is required; received {}", l); + ReadableDateTime dt; + if (l instanceof String) { + // 6.4+ + final long millis = Long.parseLong(l.toString()); + dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone)); + } else if (l instanceof ReadableInstant) { + // 6.3- + dt = (ReadableDateTime) l; + if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { + dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); + } + } else { + throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); } - ReadableDateTime dt = (ReadableDateTime) l; - - if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { - dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); - } return extractor.extract(dt); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java index bca180315d9e5..9f9c1bb21bb31 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -173,7 +173,7 @@ private String aliasName(Attribute attr) { // reference methods // private FieldExtraction topHitFieldRef(FieldAttribute fieldAttr) { - return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().hasDocValues()); + return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().getDataType(), fieldAttr.field().hasDocValues()); } private Tuple nestedHitFieldRef(FieldAttribute attr) { @@ -184,7 +184,8 @@ private Tuple nestedHitFieldRef(FieldAttribute Query q = rewriteToContainNestedField(query, attr.location(), attr.nestedParent().name(), name, attr.field().hasDocValues()); - SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().hasDocValues(), attr.parent().name()); + SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().getDataType(), + attr.field().hasDocValues(), attr.parent().name()); nestedRefs.add(nestedFieldRef); return new Tuple<>(new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit), nestedFieldRef); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java index 6a7f24b447e55..7f799108d28ec 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java @@ -6,18 +6,21 @@ package org.elasticsearch.xpack.sql.querydsl.container; import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.type.DataType; public class SearchHitFieldRef extends FieldReference { private final String name; + private final DataType dataType; private final boolean docValue; private final String hitName; - public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource) { - this(name, useDocValueInsteadOfSource, null); + public SearchHitFieldRef(String name, DataType dataType, boolean useDocValueInsteadOfSource) { + this(name, dataType, useDocValueInsteadOfSource, null); } - public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource, String hitName) { + public SearchHitFieldRef(String name, DataType dataType, boolean useDocValueInsteadOfSource, String hitName) { this.name = name; + this.dataType = dataType; this.docValue = useDocValueInsteadOfSource; this.hitName = hitName; } @@ -31,6 +34,10 @@ public String name() { return name; } + public DataType getDataType() { + return dataType; + } + public boolean useDocValue() { return docValue; } @@ -42,7 +49,8 @@ public void collectFields(SqlSourceBuilder sourceBuilder) { return; } if (docValue) { - sourceBuilder.addDocField(name); + String format = dataType == DataType.DATE ? "epoch_millis" : null; + sourceBuilder.addDocField(name, format); } else { sourceBuilder.addSourceField(name); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index a97f66763a9b6..8988f70672ac6 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.Netty4Plugin; @@ -150,7 +151,8 @@ public void testSqlTranslateActionLicense() throws Exception { SqlTranslateResponse response = client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get(); SearchSourceBuilder source = response.source(); - assertThat(source.docValueFields(), Matchers.contains("count")); + assertThat(source.docValueFields(), Matchers.contains( + new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT))); FetchSourceContext fetchSource = source.fetchSource(); assertThat(fetchSource.includes(), Matchers.arrayContaining("data")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java index 5de9cfca97a36..a4c440eb9dfad 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; @@ -35,7 +36,9 @@ public void testSqlTranslateAction() throws Exception { FetchSourceContext fetch = source.fetchSource(); assertEquals(true, fetch.fetchSource()); assertArrayEquals(new String[] { "data" }, fetch.includes()); - assertEquals(singletonList("count"), source.docValueFields()); + assertEquals( + singletonList(new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT)), + source.docValueFields()); assertEquals(singletonList(SortBuilders.fieldSort("count")), source.sorts()); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java index 0d57ad97c9831..6ee843c2c6371 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java @@ -24,8 +24,8 @@ public void testSqlSourceBuilder() { ssb.trackScores(); ssb.addSourceField("foo"); ssb.addSourceField("foo2"); - ssb.addDocField("bar"); - ssb.addDocField("bar2"); + ssb.addDocField("bar", null); + ssb.addDocField("bar2", null); final Script s = new Script("eggplant"); ssb.addScriptField("baz", s); final Script s2 = new Script("potato"); @@ -35,7 +35,7 @@ public void testSqlSourceBuilder() { assertTrue(source.trackScores()); FetchSourceContext fsc = source.fetchSource(); assertThat(Arrays.asList(fsc.includes()), contains("foo", "foo2")); - assertThat(source.docValueFields(), contains("bar", "bar2")); + assertThat(source.docValueFields().stream().map(ff -> ff.field).collect(Collectors.toList()), contains("bar", "bar2")); Map scriptFields = source.scriptFields() .stream() .collect(Collectors.toMap(SearchSourceBuilder.ScriptField::fieldName, SearchSourceBuilder.ScriptField::script)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index 74721eca22af1..375de112fe878 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -70,7 +70,7 @@ protected ComputingExtractor mutateInstance(ComputingExtractor instance) throws public void testGet() { String fieldName = randomAlphaOfLength(5); ChainingProcessor extractor = new ChainingProcessor( - new HitExtractorProcessor(new FieldHitExtractor(fieldName, true)), + new HitExtractorProcessor(new FieldHitExtractor(fieldName, null, true)), new MathProcessor(MathOperation.LOG)); int times = between(1, 1000); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index de36969898c20..a9e1349e8316b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -14,6 +14,9 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; @@ -31,7 +34,7 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase instanceReader() { @Override protected FieldHitExtractor mutateInstance(FieldHitExtractor instance) throws IOException { - return new FieldHitExtractor(instance.fieldName() + "mutated", true, instance.hitName()); + return new FieldHitExtractor(instance.fieldName() + "mutated", null, true, instance.hitName()); } @AwaitsFix(bugUrl = "implement after we're sure of the InnerHitExtractor's implementation") @@ -60,7 +63,7 @@ public void testGetDottedValueWithDocValues() { String child = randomAlphaOfLength(5); String fieldName = grandparent + "." + parent + "." + child; - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, true); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, true); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -84,7 +87,7 @@ public void testGetDottedValueWithSource() throws Exception { String child = randomAlphaOfLength(5); String fieldName = grandparent + "." + parent + "." + child; - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, false); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, false); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -123,7 +126,7 @@ public void testGetDottedValueWithSource() throws Exception { public void testGetDocValue() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, true); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, true); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -139,9 +142,19 @@ public void testGetDocValue() { } } + public void testGetDate() { + long millis = 1526467911780L; + List documentFieldValues = Collections.singletonList(Long.toString(millis)); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField("my_date_field", documentFieldValues); + hit.fields(singletonMap("my_date_field", field)); + FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATE, true); + assertEquals(new DateTime(millis, DateTimeZone.UTC), extractor.extract(hit)); + } + public void testGetSource() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, false); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, false); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -164,12 +177,12 @@ public void testGetSource() throws IOException { } public void testToString() { - assertEquals("hit.field@hit", new FieldHitExtractor("hit.field", true, "hit").toString()); + assertEquals("hit.field@hit", new FieldHitExtractor("hit.field", null, true, "hit").toString()); } public void testMultiValuedDocValue() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, true); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, true); SearchHit hit = new SearchHit(1); DocumentField field = new DocumentField(fieldName, asList("a", "b")); hit.fields(singletonMap(fieldName, field)); @@ -179,7 +192,7 @@ public void testMultiValuedDocValue() { public void testMultiValuedSourceValue() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, false); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, false); SearchHit hit = new SearchHit(1); XContentBuilder source = JsonXContent.contentBuilder(); source.startObject(); { @@ -194,7 +207,7 @@ public void testMultiValuedSourceValue() throws IOException { public void testSingleValueArrayInSource() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, false); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, false); SearchHit hit = new SearchHit(1); XContentBuilder source = JsonXContent.contentBuilder(); Object value = randomValue(); @@ -208,14 +221,14 @@ public void testSingleValueArrayInSource() throws IOException { } public void testExtractSourcePath() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c", false); + FieldHitExtractor fe = new FieldHitExtractor("a.b.c", null, false); Object value = randomValue(); Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); assertThat(fe.extractFromSource(map), is(value)); } public void testExtractSourceIncorrectPath() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", false); + FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", null, false); Object value = randomNonNullValue(); Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); @@ -223,7 +236,7 @@ public void testExtractSourceIncorrectPath() { } public void testMultiValuedSource() { - FieldHitExtractor fe = new FieldHitExtractor("a", false); + FieldHitExtractor fe = new FieldHitExtractor("a", null, false); Object value = randomValue(); Map map = singletonMap("a", asList(value, value)); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml index 9b3572739cd8c..1406e04c8da2d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml @@ -86,23 +86,6 @@ xpack.ml.get_calendars: calendar_id: "dogs_of_the_year" - - do: - xpack.ml.put_calendar: - calendar_id: "new_cal_with_unknown_job_group" - body: > - { - "job_ids": ["cal-job", "unknown-job-group"] - } - - - do: - xpack.ml.get_calendars: - calendar_id: "new_cal_with_unknown_job_group" - - match: { count: 1 } - - match: - calendars.0: - calendar_id: "new_cal_with_unknown_job_group" - job_ids: ["cal-job", "unknown-job-group"] - --- "Test get calendar given missing": - do: @@ -714,3 +697,106 @@ - match: { calendar_id: "expression" } - length: { job_ids: 1 } - match: { job_ids.0: "bar-a" } + +--- +"Test calendar actions with new job group": + - do: + xpack.ml.put_job: + job_id: calendar-job + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_calendar: + calendar_id: "cal_with_new_job_group" + body: > + { + "job_ids": ["calendar-job", "new-job-group"] + } + + - do: + xpack.ml.get_calendars: + calendar_id: "cal_with_new_job_group" + - match: { count: 1 } + - match: + calendars.0: + calendar_id: "cal_with_new_job_group" + job_ids: ["calendar-job", "new-job-group"] + + - do: + xpack.ml.post_calendar_events: + calendar_id: "cal_with_new_job_group" + body: > + { + "events" : [{ "description": "beach", "start_time": "2018-05-01T00:00:00Z", "end_time": "2018-05-06T00:00:00Z" }] + } + + - do: + xpack.ml.get_calendar_events: + calendar_id: cal_with_new_job_group + - length: { events: 1 } + - match: { events.0.description: beach } + + - do: + xpack.ml.delete_calendar: + calendar_id: "cal_with_new_job_group" + + - do: + xpack.ml.put_calendar: + calendar_id: "started_empty_calendar" + + - do: + xpack.ml.put_calendar_job: + calendar_id: "started_empty_calendar" + job_id: "new-group" + - match: { calendar_id: "started_empty_calendar" } + - length: { job_ids: 1 } + + - do: + xpack.ml.get_calendars: + calendar_id: "started_empty_calendar" + - match: { count: 1 } + - match: + calendars.0: + calendar_id: "started_empty_calendar" + job_ids: ["new-group"] + + - do: + xpack.ml.post_calendar_events: + calendar_id: "started_empty_calendar" + body: > + { + "events" : [{ "description": "beach", "start_time": "2018-05-01T00:00:00Z", "end_time": "2018-05-06T00:00:00Z" }] + } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "started_empty_calendar" + - length: { events: 1 } + - match: { events.0.description: beach } + - set: { events.0.event_id: beach_event_id } + + - do: + xpack.ml.delete_calendar_event: + calendar_id: "started_empty_calendar" + event_id: $beach_event_id + + - do: + xpack.ml.get_calendar_events: + calendar_id: "started_empty_calendar" + - length: { events: 0 } + + - do: + xpack.ml.delete_calendar: + calendar_id: "started_empty_calendar" + + - do: + catch: missing + xpack.ml.get_calendars: + calendar_id: "started_empty_calendar" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index b3d93e5298810..f47ea2d4d7e15 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -1,5 +1,10 @@ --- "Translate SQL": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: bulk: refresh: true @@ -23,7 +28,8 @@ - str excludes: [] docvalue_fields: - - int + - field: int + format: use_field_mapping sort: - int: order: asc diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 7fd983c9ba5fe..37ca14dd094cc 100644 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-watcher-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-watcher-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat index 010c154eb5a39..4c7f762dca26c 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat @@ -2,6 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -call "%~dp0x-pack-env.bat" || exit /b 1 - set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-watcher/* diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java index fa78208494f94..6b2bb26ef45f0 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest; @@ -86,6 +87,8 @@ protected WatcherServiceResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState clusterState) { + XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); + WatcherMetaData newWatcherMetaData = new WatcherMetaData(manuallyStopped); WatcherMetaData currentMetaData = clusterState.metaData().custom(WatcherMetaData.TYPE); diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java index f913395759c47..63831c2d4dec0 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java @@ -103,7 +103,10 @@ public void testExplainWithWhere() throws IOException { assertThat(readLine(), startsWith(" \"excludes\" : [ ]")); assertThat(readLine(), startsWith(" },")); assertThat(readLine(), startsWith(" \"docvalue_fields\" : [")); - assertThat(readLine(), startsWith(" \"i\"")); + assertThat(readLine(), startsWith(" {")); + assertThat(readLine(), startsWith(" \"field\" : \"i\"")); + assertThat(readLine(), startsWith(" \"format\" : \"use_field_mapping\"")); + assertThat(readLine(), startsWith(" }")); assertThat(readLine(), startsWith(" ],")); assertThat(readLine(), startsWith(" \"sort\" : [")); assertThat(readLine(), startsWith(" {")); diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java index f7abb6f64f63c..bdbb75491ca87 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -5,20 +5,16 @@ */ package org.elasticsearch.xpack.qa.sql.security; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.xpack.qa.sql.security.SqlSecurityTestCase.AuditLogAsserter; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -30,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; import java.util.stream.Collectors; import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; @@ -182,7 +177,7 @@ private static Map runSql(@Nullable String asUser, String mode, request.addParameter("mode", mode); } if (asUser != null) { - request.setHeaders(new BasicHeader("es-security-runas-user", asUser)); + request.addHeader("es-security-runas-user", asUser); } request.setEntity(entity); return toMap(client().performRequest(request)); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index e0cf0efac472e..80dd09d3c47a8 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -6,12 +6,9 @@ package org.elasticsearch.xpack.qa.sql.rest; import com.fasterxml.jackson.core.io.JsonStringEncoder; - -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -321,10 +318,9 @@ private Map runSql(String mode, HttpEntity sql, String suffix) t if (false == mode.isEmpty()) { request.addParameter("mode", mode); // JDBC or PLAIN mode } - request.setHeaders(randomFrom( - new Header[] {}, - new Header[] {new BasicHeader("Accept", "*/*")}, - new Header[] {new BasicHeader("Accpet", "application/json")})); + if (randomBoolean()) { + request.addHeader("Accept", randomFrom("*/*", "application/json")); + } request.setEntity(sql); Response response = client().performRequest(request); try (InputStream content = response.getEntity().getContent()) { @@ -540,7 +536,7 @@ private Tuple runSqlAsText(String suffix, HttpEntity entity, Str Request request = new Request("POST", "/_xpack/sql" + suffix); request.addParameter("error_trace", "true"); request.setEntity(entity); - request.setHeaders(new BasicHeader("Accept", accept)); + request.addHeader("Accept", accept); Response response = client().performRequest(request); return new Tuple<>( Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),